Merge branch 'master' into webapps-mio
Conflicts: Cargo.lock parity/main.rs
This commit is contained in:
commit
13c25c5d49
79
Cargo.lock
generated
79
Cargo.lock
generated
@ -2,6 +2,7 @@
|
||||
name = "parity"
|
||||
version = "1.1.0"
|
||||
dependencies = [
|
||||
"bincode 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clippy 0.0.63 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ctrlc 1.1.1 (git+https://github.com/tomusdrw/rust-ctrlc.git)",
|
||||
"daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -9,6 +10,9 @@ dependencies = [
|
||||
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ethcore 1.1.0",
|
||||
"ethcore-devtools 1.1.0",
|
||||
"ethcore-ipc 1.1.0",
|
||||
"ethcore-ipc-codegen 1.1.0",
|
||||
"ethcore-ipc-nano 1.1.0",
|
||||
"ethcore-rpc 1.1.0",
|
||||
"ethcore-util 1.1.0",
|
||||
"ethcore-webapp 1.1.0",
|
||||
@ -23,6 +27,9 @@ dependencies = [
|
||||
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_codegen 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"syntex 0.31.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@ -63,6 +70,17 @@ dependencies = [
|
||||
"serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bincode"
|
||||
version = "0.5.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"byteorder 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "0.3.3"
|
||||
@ -78,6 +96,11 @@ name = "blastfig"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "bytes"
|
||||
version = "0.3.0"
|
||||
@ -237,6 +260,36 @@ dependencies = [
|
||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ethcore-ipc"
|
||||
version = "1.1.0"
|
||||
dependencies = [
|
||||
"ethcore-devtools 1.1.0",
|
||||
"nanomsg 0.5.0 (git+https://github.com/ethcore/nanomsg.rs.git)",
|
||||
"semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ethcore-ipc-codegen"
|
||||
version = "1.1.0"
|
||||
dependencies = [
|
||||
"aster 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ethcore-ipc 1.1.0",
|
||||
"quasi 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"quasi_codegen 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"syntex 0.31.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"syntex_syntax 0.31.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ethcore-ipc-nano"
|
||||
version = "1.1.0"
|
||||
dependencies = [
|
||||
"ethcore-ipc 1.1.0",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"nanomsg 0.5.0 (git+https://github.com/ethcore/nanomsg.rs.git)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ethcore-rpc"
|
||||
version = "1.1.0"
|
||||
@ -304,7 +357,7 @@ dependencies = [
|
||||
"jsonrpc-core 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"jsonrpc-http-server 5.0.1 (git+https://github.com/debris/jsonrpc-http-server.git)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parity-status 0.1.6 (git+https://github.com/tomusdrw/parity-status.git)",
|
||||
"parity-status 0.1.7 (git+https://github.com/tomusdrw/parity-status.git)",
|
||||
"parity-wallet 0.1.1 (git+https://github.com/tomusdrw/parity-wallet.git)",
|
||||
"parity-webapp 0.1.0 (git+https://github.com/tomusdrw/parity-webapp.git)",
|
||||
"url 0.5.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -494,7 +547,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "jsonrpc-http-server"
|
||||
version = "5.0.1"
|
||||
source = "git+https://github.com/debris/jsonrpc-http-server.git#239066b94660a1af24c8b2efc16e800f9c7cce18"
|
||||
source = "git+https://github.com/debris/jsonrpc-http-server.git#e728f2e080799b7a62b0b5cf5fa9d4ad65cd8c96"
|
||||
dependencies = [
|
||||
"hyper 0.9.0-mio (git+https://github.com/hyperium/hyper?branch=mio)",
|
||||
"jsonrpc-core 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -610,6 +663,24 @@ dependencies = [
|
||||
"ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nanomsg"
|
||||
version = "0.5.0"
|
||||
source = "git+https://github.com/ethcore/nanomsg.rs.git#9c81fb3b0f71714b173d0abf14bfd30addf8c7b1"
|
||||
dependencies = [
|
||||
"libc 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"nanomsg-sys 0.5.0 (git+https://github.com/ethcore/nanomsg.rs.git)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nanomsg-sys"
|
||||
version = "0.5.0"
|
||||
source = "git+https://github.com/ethcore/nanomsg.rs.git#9c81fb3b0f71714b173d0abf14bfd30addf8c7b1"
|
||||
dependencies = [
|
||||
"gcc 0.3.26 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "net2"
|
||||
version = "0.2.23"
|
||||
@ -676,8 +747,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "parity-status"
|
||||
version = "0.1.6"
|
||||
source = "git+https://github.com/tomusdrw/parity-status.git#bbd45f5ccc6a0ccc9ed2c8b666b012844f9b89a8"
|
||||
version = "0.1.7"
|
||||
source = "git+https://github.com/tomusdrw/parity-status.git#5b7010eb7ecc38e80ab506902e083dc0dd48c43f"
|
||||
dependencies = [
|
||||
"parity-webapp 0.1.0 (git+https://github.com/tomusdrw/parity-webapp.git)",
|
||||
]
|
||||
|
@ -8,6 +8,9 @@ build = "build.rs"
|
||||
|
||||
[build-dependencies]
|
||||
rustc_version = "0.1"
|
||||
syntex = "*"
|
||||
serde_codegen = "0.7.0"
|
||||
"ethcore-ipc-codegen" = { path = "ipc/codegen" }
|
||||
|
||||
[dependencies]
|
||||
log = "0.3"
|
||||
@ -30,6 +33,10 @@ ethcore-devtools = { path = "devtools" }
|
||||
ethcore-rpc = { path = "rpc", optional = true }
|
||||
ethcore-webapp = { path = "webapp", optional = true }
|
||||
semver = "0.2"
|
||||
ethcore-ipc-nano = { path = "ipc/nano" }
|
||||
"ethcore-ipc" = { path = "ipc/rpc" }
|
||||
bincode = "*"
|
||||
serde = "0.7.0"
|
||||
|
||||
[dependencies.hyper]
|
||||
version = "0.8"
|
||||
|
@ -19,7 +19,7 @@ First (if you don't already have it) get multirust:
|
||||
|
||||
- Linux:
|
||||
```bash
|
||||
curl -sf https://raw.githubusercontent.com/brson/multirust/master/quick-install.sh | sudo sh -s -- --yes
|
||||
curl -sf https://raw.githubusercontent.com/brson/multirust/master/quick-install.sh | sh
|
||||
```
|
||||
|
||||
- OSX with Homebrew:
|
||||
|
25
build.rs
25
build.rs
@ -15,11 +15,36 @@
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
extern crate rustc_version;
|
||||
extern crate syntex;
|
||||
extern crate ethcore_ipc_codegen as codegen;
|
||||
extern crate serde_codegen;
|
||||
|
||||
use std::env;
|
||||
use std::path::Path;
|
||||
use rustc_version::{version_meta, Channel};
|
||||
|
||||
fn main() {
|
||||
if let Channel::Nightly = version_meta().channel {
|
||||
println!("cargo:rustc-cfg=nightly");
|
||||
}
|
||||
|
||||
let out_dir = env::var_os("OUT_DIR").unwrap();
|
||||
|
||||
// ipc pass
|
||||
{
|
||||
let src = Path::new("parity/hypervisor/service.rs.in");
|
||||
let dst = Path::new(&out_dir).join("hypervisor_service_ipc.rs");
|
||||
let mut registry = syntex::Registry::new();
|
||||
codegen::register(&mut registry);
|
||||
registry.expand("", &src, &dst).unwrap();
|
||||
}
|
||||
|
||||
// serde pass
|
||||
{
|
||||
let src = Path::new(&out_dir).join("hypervisor_service_ipc.rs");
|
||||
let dst = Path::new(&out_dir).join("hypervisor_service_cg.rs");
|
||||
let mut registry = syntex::Registry::new();
|
||||
serde_codegen::register(&mut registry);
|
||||
registry.expand("", &src, &dst).unwrap();
|
||||
}
|
||||
}
|
||||
|
@ -12,11 +12,9 @@ RUN apt-get -y update && \
|
||||
# install multirust
|
||||
RUN curl -sf https://raw.githubusercontent.com/brson/multirust/master/blastoff.sh | sh -s -- --yes
|
||||
ENV RUST_TARGETS="arm-unknown-linux-gnueabihf"
|
||||
# multirust override beta
|
||||
RUN multirust override beta
|
||||
|
||||
# multirust add arm--linux-gnuabhf toolchain
|
||||
RUN multirust add-target beta arm-unknown-linux-gnueabihf
|
||||
RUN multirust add-target stable arm-unknown-linux-gnueabihf
|
||||
|
||||
# show backtraces
|
||||
ENV RUST_BACKTRACE 1
|
||||
@ -41,7 +39,6 @@ RUN git clone https://github.com/ethcore/parity && \
|
||||
cat .cargo/config && \
|
||||
rustc -vV && \
|
||||
cargo -V && \
|
||||
cargo update && \
|
||||
cargo build --target arm-unknown-linux-gnueabihf --release --verbose && \
|
||||
ls /build/parity/target/arm-unknown-linux-gnueabihf/release/parity && \
|
||||
file /build/parity/target/arm-unknown-linux-gnueabihf/release/parity && \
|
||||
|
@ -27,7 +27,7 @@ use std::ptr;
|
||||
use sha3;
|
||||
use std::slice;
|
||||
use std::path::PathBuf;
|
||||
use std::io::{Read, Write, self};
|
||||
use std::io::{self, Read, Write};
|
||||
use std::fs::{self, File};
|
||||
|
||||
pub const ETHASH_EPOCH_LENGTH: u64 = 30000;
|
||||
@ -51,7 +51,7 @@ pub struct ProofOfWork {
|
||||
/// Difficulty boundary
|
||||
pub value: H256,
|
||||
/// Mix
|
||||
pub mix_hash: H256
|
||||
pub mix_hash: H256,
|
||||
}
|
||||
|
||||
struct Node {
|
||||
@ -148,14 +148,16 @@ impl Light {
|
||||
|
||||
pub struct SeedHashCompute {
|
||||
prev_epoch: Cell<u64>,
|
||||
prev_seedhash: Cell<H256>
|
||||
prev_seedhash: Cell<H256>,
|
||||
}
|
||||
|
||||
impl SeedHashCompute {
|
||||
|
||||
#[inline]
|
||||
pub fn new() -> SeedHashCompute {
|
||||
SeedHashCompute { prev_epoch: Cell::new(0), prev_seedhash: Cell::new([0u8; 32]) }
|
||||
SeedHashCompute {
|
||||
prev_epoch: Cell::new(0),
|
||||
prev_seedhash: Cell::new([0u8; 32]),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -181,7 +183,7 @@ impl SeedHashCompute {
|
||||
|
||||
#[inline]
|
||||
pub fn resume_compute_seedhash(mut hash: H256, start_epoch: u64, end_epoch: u64) -> H256 {
|
||||
for _ in start_epoch .. end_epoch {
|
||||
for _ in start_epoch..end_epoch {
|
||||
unsafe { sha3::sha3_256(hash[..].as_mut_ptr(), 32, hash[..].as_ptr(), 32) };
|
||||
}
|
||||
hash
|
||||
@ -254,7 +256,7 @@ fn hash_compute(light: &Light, full_size: usize, header_hash: &H256, nonce: u64
|
||||
panic!("Unaligned full size");
|
||||
}
|
||||
// pack hash and nonce together into first 40 bytes of s_mix
|
||||
let mut s_mix: [Node; MIX_NODES + 1] = [ Node::default(), Node::default(), Node::default() ];
|
||||
let mut s_mix: [Node; MIX_NODES + 1] = [Node::default(), Node::default(), Node::default()];
|
||||
unsafe { ptr::copy_nonoverlapping(header_hash.as_ptr(), s_mix.get_unchecked_mut(0).bytes.as_mut_ptr(), 32) };
|
||||
unsafe { ptr::copy_nonoverlapping(mem::transmute(&nonce), s_mix.get_unchecked_mut(0).bytes[32..].as_mut_ptr(), 8) };
|
||||
|
||||
@ -348,7 +350,7 @@ fn light_new(block_number: u64) -> Light {
|
||||
let idx = *nodes.get_unchecked_mut(i).as_words().get_unchecked(0) as usize % num_nodes;
|
||||
let mut data = nodes.get_unchecked((num_nodes - 1 + i) % num_nodes).clone();
|
||||
for w in 0..NODE_WORDS {
|
||||
*data.as_words_mut().get_unchecked_mut(w) ^= *nodes.get_unchecked(idx).as_words().get_unchecked(w) ;
|
||||
*data.as_words_mut().get_unchecked_mut(w) ^= *nodes.get_unchecked(idx).as_words().get_unchecked(w);
|
||||
}
|
||||
sha3_512(&data.bytes, &mut nodes.get_unchecked_mut(i).bytes);
|
||||
}
|
||||
@ -362,7 +364,7 @@ fn light_new(block_number: u64) -> Light {
|
||||
}
|
||||
}
|
||||
|
||||
static CHARS: &'static[u8] = b"0123456789abcdef";
|
||||
static CHARS: &'static [u8] = b"0123456789abcdef";
|
||||
fn to_hex(bytes: &[u8]) -> String {
|
||||
let mut v = Vec::with_capacity(bytes.len() * 2);
|
||||
for &byte in bytes.iter() {
|
||||
@ -370,9 +372,7 @@ fn to_hex(bytes: &[u8]) -> String {
|
||||
v.push(CHARS[(byte & 0xf) as usize]);
|
||||
}
|
||||
|
||||
unsafe {
|
||||
String::from_utf8_unchecked(v)
|
||||
}
|
||||
unsafe { String::from_utf8_unchecked(v) }
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -403,7 +403,7 @@ fn test_get_data_size() {
|
||||
#[test]
|
||||
fn test_difficulty_test() {
|
||||
let hash = [0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3, 0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94, 0x05, 0x52, 0x7d, 0x72];
|
||||
let mix_hash = [0x1f, 0xff, 0x04, 0xce, 0xc9, 0x41, 0x73, 0xfd, 0x59, 0x1e, 0x3d, 0x89, 0x60, 0xce, 0x6b, 0xdf, 0x8b, 0x19, 0x71, 0x04, 0x8c, 0x71, 0xff, 0x93, 0x7b, 0xb2, 0xd3, 0x2a, 0x64, 0x31, 0xab, 0x6d ];
|
||||
let mix_hash = [0x1f, 0xff, 0x04, 0xce, 0xc9, 0x41, 0x73, 0xfd, 0x59, 0x1e, 0x3d, 0x89, 0x60, 0xce, 0x6b, 0xdf, 0x8b, 0x19, 0x71, 0x04, 0x8c, 0x71, 0xff, 0x93, 0x7b, 0xb2, 0xd3, 0x2a, 0x64, 0x31, 0xab, 0x6d];
|
||||
let nonce = 0xd7b3ac70a301a249;
|
||||
let boundary_good = [0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2, 0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a, 0xe9, 0x7e, 0x53, 0x84];
|
||||
assert_eq!(quick_get_difficulty(&hash, nonce, &mix_hash)[..], boundary_good[..]);
|
||||
@ -414,7 +414,7 @@ fn test_difficulty_test() {
|
||||
#[test]
|
||||
fn test_light_compute() {
|
||||
let hash = [0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3, 0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94, 0x05, 0x52, 0x7d, 0x72];
|
||||
let mix_hash = [0x1f, 0xff, 0x04, 0xce, 0xc9, 0x41, 0x73, 0xfd, 0x59, 0x1e, 0x3d, 0x89, 0x60, 0xce, 0x6b, 0xdf, 0x8b, 0x19, 0x71, 0x04, 0x8c, 0x71, 0xff, 0x93, 0x7b, 0xb2, 0xd3, 0x2a, 0x64, 0x31, 0xab, 0x6d ];
|
||||
let mix_hash = [0x1f, 0xff, 0x04, 0xce, 0xc9, 0x41, 0x73, 0xfd, 0x59, 0x1e, 0x3d, 0x89, 0x60, 0xce, 0x6b, 0xdf, 0x8b, 0x19, 0x71, 0x04, 0x8c, 0x71, 0xff, 0x93, 0x7b, 0xb2, 0xd3, 0x2a, 0x64, 0x31, 0xab, 0x6d];
|
||||
let boundary = [0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2, 0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a, 0xe9, 0x7e, 0x53, 0x84];
|
||||
let nonce = 0xd7b3ac70a301a249;
|
||||
// difficulty = 0x085657254bd9u64;
|
||||
|
@ -24,7 +24,7 @@ mod compute;
|
||||
|
||||
use std::mem;
|
||||
use compute::Light;
|
||||
pub use compute::{SeedHashCompute, quick_get_difficulty, H256, ProofOfWork, ETHASH_EPOCH_LENGTH};
|
||||
pub use compute::{ETHASH_EPOCH_LENGTH, H256, ProofOfWork, SeedHashCompute, quick_get_difficulty};
|
||||
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
@ -76,7 +76,7 @@ impl EthashManager {
|
||||
lights.recent.clone()
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
},
|
||||
};
|
||||
match light {
|
||||
None => {
|
||||
@ -95,7 +95,7 @@ impl EthashManager {
|
||||
lights.prev = mem::replace(&mut lights.recent, Some(light.clone()));
|
||||
light
|
||||
}
|
||||
Some(light) => light
|
||||
Some(light) => light,
|
||||
}
|
||||
};
|
||||
light.compute(header_hash, nonce)
|
||||
|
@ -127,6 +127,7 @@ impl Client<CanonVerifier> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the path for the databases given the root path and information on the databases.
|
||||
pub fn get_db_path(path: &Path, pruning: journaldb::Algorithm, genesis_hash: H256) -> PathBuf {
|
||||
let mut dir = path.to_path_buf();
|
||||
dir.push(H64::from(genesis_hash).hex());
|
||||
@ -136,6 +137,7 @@ pub fn get_db_path(path: &Path, pruning: journaldb::Algorithm, genesis_hash: H25
|
||||
dir
|
||||
}
|
||||
|
||||
/// Append a path element to the given path and return the string.
|
||||
pub fn append_path(path: &Path, item: &str) -> String {
|
||||
let mut p = path.to_path_buf();
|
||||
p.push(item);
|
||||
|
@ -48,6 +48,8 @@ pub struct TestBlockChainClient {
|
||||
pub difficulty: RwLock<U256>,
|
||||
/// Balances.
|
||||
pub balances: RwLock<HashMap<Address, U256>>,
|
||||
/// Nonces.
|
||||
pub nonces: RwLock<HashMap<Address, U256>>,
|
||||
/// Storage.
|
||||
pub storage: RwLock<HashMap<(Address, H256), H256>>,
|
||||
/// Code.
|
||||
@ -90,6 +92,7 @@ impl TestBlockChainClient {
|
||||
last_hash: RwLock::new(H256::new()),
|
||||
difficulty: RwLock::new(From::from(0)),
|
||||
balances: RwLock::new(HashMap::new()),
|
||||
nonces: RwLock::new(HashMap::new()),
|
||||
storage: RwLock::new(HashMap::new()),
|
||||
code: RwLock::new(HashMap::new()),
|
||||
execution_result: RwLock::new(None),
|
||||
@ -116,6 +119,11 @@ impl TestBlockChainClient {
|
||||
self.balances.write().unwrap().insert(address, balance);
|
||||
}
|
||||
|
||||
/// Set nonce of account `address` to `nonce`.
|
||||
pub fn set_nonce(&self, address: Address, nonce: U256) {
|
||||
self.nonces.write().unwrap().insert(address, nonce);
|
||||
}
|
||||
|
||||
/// Set `code` at `address`.
|
||||
pub fn set_code(&self, address: Address, code: Bytes) {
|
||||
self.code.write().unwrap().insert(address, code);
|
||||
@ -157,6 +165,8 @@ impl TestBlockChainClient {
|
||||
EachBlockWith::Transaction | EachBlockWith::UncleAndTransaction => {
|
||||
let mut txs = RlpStream::new_list(1);
|
||||
let keypair = KeyPair::create().unwrap();
|
||||
// Update nonces value
|
||||
self.nonces.write().unwrap().insert(keypair.address(), U256::one());
|
||||
let tx = Transaction {
|
||||
action: Action::Create,
|
||||
value: U256::from(100),
|
||||
@ -222,8 +232,8 @@ impl BlockChainClient for TestBlockChainClient {
|
||||
unimplemented!();
|
||||
}
|
||||
|
||||
fn nonce(&self, _address: &Address) -> U256 {
|
||||
U256::zero()
|
||||
fn nonce(&self, address: &Address) -> U256 {
|
||||
self.nonces.read().unwrap().get(address).cloned().unwrap_or_else(U256::zero)
|
||||
}
|
||||
|
||||
fn code(&self, address: &Address) -> Option<Bytes> {
|
||||
|
@ -25,6 +25,9 @@ use syntax::ast::{
|
||||
PatKind,
|
||||
FunctionRetTy,
|
||||
Ty,
|
||||
TraitRef,
|
||||
Ident,
|
||||
Generics,
|
||||
};
|
||||
|
||||
use syntax::ast;
|
||||
@ -33,6 +36,9 @@ use syntax::ext::base::{Annotatable, ExtCtxt};
|
||||
use syntax::ext::build::AstBuilder;
|
||||
use syntax::ptr::P;
|
||||
|
||||
use super::typegen;
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub struct Error;
|
||||
|
||||
const RESERVED_MESSAGE_IDS: u16 = 16;
|
||||
@ -54,15 +60,15 @@ pub fn expand_ipc_implementation(
|
||||
|
||||
let builder = aster::AstBuilder::new().span(span);
|
||||
|
||||
let (impl_item, dispatches) = match implement_interface(cx, &builder, &item, push) {
|
||||
Ok((item, dispatches)) => (item, dispatches),
|
||||
let interface_map = match implement_interface(cx, &builder, &item, push) {
|
||||
Ok(interface_map) => interface_map,
|
||||
Err(Error) => { return; }
|
||||
};
|
||||
|
||||
push_client(cx, &builder, &item, &dispatches, push);
|
||||
push_client(cx, &builder, &interface_map, push);
|
||||
push_handshake_struct(cx, push);
|
||||
|
||||
push(Annotatable::Item(impl_item))
|
||||
push(Annotatable::Item(interface_map.item));
|
||||
}
|
||||
|
||||
fn push_handshake_struct(cx: &ExtCtxt, push: &mut FnMut(Annotatable)) {
|
||||
@ -89,9 +95,9 @@ fn push_invoke_signature_aster(
|
||||
builder: &aster::AstBuilder,
|
||||
implement: &ImplItem,
|
||||
signature: &MethodSig,
|
||||
replacements: &HashMap<String, P<Ty>>,
|
||||
push: &mut FnMut(Annotatable),
|
||||
) -> Dispatch {
|
||||
|
||||
let inputs = &signature.decl.inputs;
|
||||
let (input_type_name, input_arg_names, input_arg_tys) = if inputs.len() > 0 {
|
||||
let first_field_name = field_name(builder, &inputs[0]).name.as_str();
|
||||
@ -102,19 +108,27 @@ fn push_invoke_signature_aster(
|
||||
|
||||
let mut arg_names = Vec::new();
|
||||
let mut arg_tys = Vec::new();
|
||||
|
||||
let arg_name = format!("{}", field_name(builder, &inputs[skip-1]).name);
|
||||
let arg_ty = inputs[skip-1].ty.clone();
|
||||
|
||||
let mut tree = builder.item()
|
||||
.attr().word("derive(Serialize, Deserialize)")
|
||||
.attr().word("allow(non_camel_case_types)")
|
||||
.struct_(name_str.as_str())
|
||||
.field(arg_name.as_str()).ty().build(arg_ty.clone());
|
||||
.field(arg_name.as_str()).ty()
|
||||
.build(typegen::argument_replacement(builder, replacements, &arg_ty).unwrap_or(arg_ty.clone()));
|
||||
|
||||
arg_names.push(arg_name);
|
||||
arg_tys.push(arg_ty.clone());
|
||||
arg_tys.push(arg_ty);
|
||||
for arg in inputs.iter().skip(skip) {
|
||||
let arg_name = format!("{}", field_name(builder, &arg));
|
||||
let arg_ty = arg.ty.clone();
|
||||
tree = tree.field(arg_name.as_str()).ty().build(arg_ty.clone());
|
||||
|
||||
let mut arg_ty = arg.ty.clone();
|
||||
arg_ty = typegen::argument_replacement(builder, replacements, &arg_ty).unwrap_or(arg_ty);
|
||||
|
||||
tree = tree.field(arg_name.as_str()).ty()
|
||||
.build(typegen::argument_replacement(builder, replacements, &arg_ty).unwrap_or(arg_ty.clone()));
|
||||
arg_names.push(arg_name);
|
||||
arg_tys.push(arg_ty);
|
||||
}
|
||||
@ -127,7 +141,7 @@ fn push_invoke_signature_aster(
|
||||
(None, vec![], vec![])
|
||||
};
|
||||
|
||||
let (return_type_name, return_type_ty) = match signature.decl.output {
|
||||
let return_type_ty = match signature.decl.output {
|
||||
FunctionRetTy::Ty(ref ty) => {
|
||||
let name_str = format!("{}_output", implement.ident.name.as_str());
|
||||
let tree = builder.item()
|
||||
@ -136,9 +150,9 @@ fn push_invoke_signature_aster(
|
||||
.struct_(name_str.as_str())
|
||||
.field(format!("payload")).ty().build(ty.clone());
|
||||
push(Annotatable::Item(tree.build()));
|
||||
(Some(name_str.to_owned()), Some(ty.clone()))
|
||||
Some(ty.clone())
|
||||
}
|
||||
_ => (None, None)
|
||||
_ => None
|
||||
};
|
||||
|
||||
Dispatch {
|
||||
@ -146,7 +160,6 @@ fn push_invoke_signature_aster(
|
||||
input_type_name: input_type_name,
|
||||
input_arg_names: input_arg_names,
|
||||
input_arg_tys: input_arg_tys,
|
||||
return_type_name: return_type_name,
|
||||
return_type_ty: return_type_ty,
|
||||
}
|
||||
}
|
||||
@ -156,7 +169,6 @@ struct Dispatch {
|
||||
input_type_name: Option<String>,
|
||||
input_arg_names: Vec<String>,
|
||||
input_arg_tys: Vec<P<Ty>>,
|
||||
return_type_name: Option<String>,
|
||||
return_type_ty: Option<P<Ty>>,
|
||||
}
|
||||
|
||||
@ -172,14 +184,19 @@ fn implement_dispatch_arm_invoke_stmt(
|
||||
cx: &ExtCtxt,
|
||||
builder: &aster::AstBuilder,
|
||||
dispatch: &Dispatch,
|
||||
replacements: &HashMap<String, P<Ty>>,
|
||||
) -> ast::Stmt
|
||||
{
|
||||
let function_name = builder.id(dispatch.function_name.as_str());
|
||||
let output_type_id = builder.id(dispatch.return_type_name.clone().unwrap().as_str());
|
||||
|
||||
let input_args_exprs = dispatch.input_arg_names.iter().map(|ref arg_name| {
|
||||
let input_args_exprs = dispatch.input_arg_names.iter().enumerate().map(|(arg_index, arg_name)| {
|
||||
let arg_ident = builder.id(arg_name);
|
||||
if typegen::argument_replacement(builder, replacements, &dispatch.input_arg_tys[arg_index]).is_some() {
|
||||
quote_expr!(cx, input. $arg_ident .into())
|
||||
}
|
||||
else {
|
||||
quote_expr!(cx, input. $arg_ident)
|
||||
}
|
||||
}).collect::<Vec<P<ast::Expr>>>();
|
||||
|
||||
let ext_cx = &*cx;
|
||||
@ -198,10 +215,6 @@ fn implement_dispatch_arm_invoke_stmt(
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("serialize"), ::syntax::parse::token::Plain)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::OpenDelim(::syntax::parse::token::Paren)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::BinOp(::syntax::parse::token::And)));
|
||||
tt.extend(::quasi::ToTokens::to_tokens(&output_type_id, ext_cx).into_iter());
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::OpenDelim(::syntax::parse::token::Brace)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("payload"), ::syntax::parse::token::Plain)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Colon));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("self"), ::syntax::parse::token::Plain)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Dot));
|
||||
tt.extend(::quasi::ToTokens::to_tokens(&function_name, ext_cx).into_iter());
|
||||
@ -213,7 +226,6 @@ fn implement_dispatch_arm_invoke_stmt(
|
||||
}
|
||||
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::CloseDelim(::syntax::parse::token::Paren)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::CloseDelim(::syntax::parse::token::Brace)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Comma));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::ModSep));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("bincode"), ::syntax::parse::token::ModName)));
|
||||
@ -236,6 +248,7 @@ fn implement_dispatch_arm_invoke(
|
||||
builder: &aster::AstBuilder,
|
||||
dispatch: &Dispatch,
|
||||
buffer: bool,
|
||||
replacements: &HashMap<String, P<Ty>>,
|
||||
) -> P<ast::Expr>
|
||||
{
|
||||
let deserialize_expr = if buffer {
|
||||
@ -246,7 +259,7 @@ fn implement_dispatch_arm_invoke(
|
||||
|
||||
let input_type_id = builder.id(dispatch.input_type_name.clone().unwrap().as_str());
|
||||
|
||||
let invoke_serialize_stmt = implement_dispatch_arm_invoke_stmt(cx, builder, dispatch);
|
||||
let invoke_serialize_stmt = implement_dispatch_arm_invoke_stmt(cx, builder, dispatch, replacements);
|
||||
quote_expr!(cx, {
|
||||
let input: $input_type_id = $deserialize_expr;
|
||||
$invoke_serialize_stmt
|
||||
@ -260,10 +273,11 @@ fn implement_dispatch_arm(
|
||||
index: u32,
|
||||
dispatch: &Dispatch,
|
||||
buffer: bool,
|
||||
replacements: &HashMap<String, P<Ty>>,
|
||||
) -> ast::Arm
|
||||
{
|
||||
let index_ident = builder.id(format!("{}", index + (RESERVED_MESSAGE_IDS as u32)).as_str());
|
||||
let invoke_expr = implement_dispatch_arm_invoke(cx, builder, dispatch, buffer);
|
||||
let invoke_expr = implement_dispatch_arm_invoke(cx, builder, dispatch, buffer, replacements);
|
||||
quote_arm!(cx, $index_ident => { $invoke_expr } )
|
||||
}
|
||||
|
||||
@ -272,37 +286,12 @@ fn implement_dispatch_arms(
|
||||
builder: &aster::AstBuilder,
|
||||
dispatches: &[Dispatch],
|
||||
buffer: bool,
|
||||
replacements: &HashMap<String, P<Ty>>,
|
||||
) -> Vec<ast::Arm>
|
||||
{
|
||||
let mut index = -1;
|
||||
dispatches.iter()
|
||||
.map(|dispatch| { index = index + 1; implement_dispatch_arm(cx, builder, index as u32, dispatch, buffer) }).collect()
|
||||
}
|
||||
|
||||
/// generates client type for specified server type
|
||||
/// for say `Service` it generates `ServiceClient`
|
||||
fn push_client_struct(cx: &ExtCtxt, builder: &aster::AstBuilder, item: &Item, push: &mut FnMut(Annotatable)) {
|
||||
let (_, client_ident) = get_item_idents(builder, item);
|
||||
let client_struct_item = quote_item!(cx,
|
||||
pub struct $client_ident <S: ::ipc::IpcSocket> {
|
||||
socket: ::std::cell::RefCell<S>,
|
||||
phantom: ::std::marker::PhantomData<S>,
|
||||
});
|
||||
|
||||
push(Annotatable::Item(client_struct_item.expect(&format!("could not generate client struct for {:?}", client_ident.name))));
|
||||
}
|
||||
|
||||
/// pushes generated code for the client class (type declaration and method invocation implementations)
|
||||
fn push_client(
|
||||
cx: &ExtCtxt,
|
||||
builder: &aster::AstBuilder,
|
||||
item: &Item,
|
||||
dispatches: &[Dispatch],
|
||||
push: &mut FnMut(Annotatable))
|
||||
{
|
||||
push_client_struct(cx, builder, item, push);
|
||||
push_client_implementation(cx, builder, dispatches, item, push);
|
||||
push_with_socket_client_implementation(cx, builder, item, push);
|
||||
.map(|dispatch| { index = index + 1; implement_dispatch_arm(cx, builder, index as u32, dispatch, buffer, replacements) }).collect()
|
||||
}
|
||||
|
||||
/// returns an expression with the body for single operation that is being sent to server
|
||||
@ -328,16 +317,19 @@ fn implement_client_method_body(
|
||||
cx: &ExtCtxt,
|
||||
builder: &aster::AstBuilder,
|
||||
index: u16,
|
||||
dispatch: &Dispatch,
|
||||
)
|
||||
-> P<ast::Expr>
|
||||
interface_map: &InterfaceMap,
|
||||
) -> P<ast::Expr>
|
||||
{
|
||||
let dispatch = &interface_map.dispatches[index as usize];
|
||||
let request = if dispatch.input_arg_names.len() > 0 {
|
||||
|
||||
let arg_name = dispatch.input_arg_names[0].as_str();
|
||||
let static_ty = &dispatch.input_arg_tys[0];
|
||||
let arg_ty = builder
|
||||
.ty().ref_()
|
||||
.lifetime("'a")
|
||||
.ty().build(dispatch.input_arg_tys[0].clone());
|
||||
.ty()
|
||||
.build(typegen::argument_replacement(builder, &interface_map.replacements, static_ty).unwrap_or(static_ty.clone()));
|
||||
|
||||
let mut tree = builder.item()
|
||||
.attr().word("derive(Serialize)")
|
||||
@ -345,15 +337,19 @@ fn implement_client_method_body(
|
||||
.generics()
|
||||
.lifetime_name("'a")
|
||||
.build()
|
||||
.field(arg_name).ty().build(arg_ty);
|
||||
.field(arg_name).ty()
|
||||
.build(arg_ty);
|
||||
|
||||
for arg_idx in 1..dispatch.input_arg_names.len() {
|
||||
let arg_name = dispatch.input_arg_names[arg_idx].as_str();
|
||||
let static_ty = &dispatch.input_arg_tys[arg_idx];
|
||||
let arg_ty = builder
|
||||
.ty().ref_()
|
||||
.lifetime("'a")
|
||||
.ty().build(dispatch.input_arg_tys[arg_idx].clone());
|
||||
.ty()
|
||||
.build(typegen::argument_replacement(builder, &interface_map.replacements, static_ty).unwrap_or(static_ty.clone()));
|
||||
tree = tree.field(arg_name).ty().build(arg_ty);
|
||||
|
||||
}
|
||||
let mut request_serialization_statements = Vec::new();
|
||||
|
||||
@ -378,11 +374,28 @@ fn implement_client_method_body(
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("Request"), ::syntax::parse::token::Plain)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::OpenDelim(::syntax::parse::token::Brace)));
|
||||
|
||||
for arg in dispatch.input_arg_names.iter() {
|
||||
for (idx, arg) in dispatch.input_arg_names.iter().enumerate() {
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of(arg.as_str()), ::syntax::parse::token::Plain)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Colon));
|
||||
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::BinOp(::syntax::parse::token::And)));
|
||||
|
||||
let arg_ty = &dispatch.input_arg_tys[idx];
|
||||
let replacement = typegen::argument_replacement(builder, &interface_map.replacements, arg_ty);
|
||||
if let Some(ref replacement_ty) = replacement {
|
||||
let replacor_ident = ::syntax::print::pprust::ty_to_string(replacement_ty);
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of(&replacor_ident), ::syntax::parse::token::Plain)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::ModSep));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("from"), ::syntax::parse::token::Plain)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::OpenDelim(::syntax::parse::token::Paren)));
|
||||
}
|
||||
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of(arg.as_str()), ::syntax::parse::token::Plain)));
|
||||
|
||||
if replacement.is_some() {
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::CloseDelim(::syntax::parse::token::Paren)));
|
||||
}
|
||||
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Comma));
|
||||
}
|
||||
|
||||
@ -434,11 +447,13 @@ fn implement_client_method(
|
||||
cx: &ExtCtxt,
|
||||
builder: &aster::AstBuilder,
|
||||
index: u16,
|
||||
dispatch: &Dispatch)
|
||||
interface_map: &InterfaceMap,
|
||||
)
|
||||
-> ast::ImplItem
|
||||
{
|
||||
let dispatch = &interface_map.dispatches[index as usize];
|
||||
let method_name = builder.id(dispatch.function_name.as_str());
|
||||
let body = implement_client_method_body(cx, builder, index, dispatch);
|
||||
let body = implement_client_method_body(cx, builder, index, interface_map);
|
||||
|
||||
let ext_cx = &*cx;
|
||||
// expanded version of this
|
||||
@ -485,18 +500,75 @@ fn implement_client_method(
|
||||
signature.unwrap()
|
||||
}
|
||||
|
||||
fn client_generics(builder: &aster::AstBuilder, interface_map: &InterfaceMap) -> Generics {
|
||||
let ty_param = aster::ty_param::TyParamBuilder::new(
|
||||
builder.id("S")).trait_bound(
|
||||
builder.path().global().ids(&["ipc", "IpcSocket"]).build()
|
||||
).build().build();
|
||||
|
||||
builder.from_generics(interface_map.generics.clone())
|
||||
.with_ty_param(ty_param)
|
||||
.build()
|
||||
}
|
||||
|
||||
fn client_qualified_ident(builder: &aster::AstBuilder, interface_map: &InterfaceMap) -> P<Ty> {
|
||||
let generics = client_generics(builder, interface_map);
|
||||
aster::ty::TyBuilder::new().path().segment(interface_map.ident_map.client_ident(builder))
|
||||
.with_generics(generics).build()
|
||||
.build()
|
||||
}
|
||||
|
||||
fn client_phantom_ident(builder: &aster::AstBuilder, interface_map: &InterfaceMap) -> P<Ty> {
|
||||
let generics = client_generics(builder, interface_map);
|
||||
aster::ty::TyBuilder::new().phantom_data()
|
||||
.tuple().with_tys(generics.ty_params.iter().map(|x| aster::ty::TyBuilder::new().id(x.ident)))
|
||||
.build()
|
||||
}
|
||||
|
||||
/// generates client type for specified server type
|
||||
/// for say `Service` it generates `ServiceClient`
|
||||
fn push_client_struct(cx: &ExtCtxt, builder: &aster::AstBuilder, interface_map: &InterfaceMap, push: &mut FnMut(Annotatable)) {
|
||||
let generics = client_generics(builder, interface_map);
|
||||
let client_short_ident = interface_map.ident_map.client_ident(builder);
|
||||
let phantom = client_phantom_ident(builder, interface_map);
|
||||
|
||||
let client_struct_item = quote_item!(cx,
|
||||
pub struct $client_short_ident $generics {
|
||||
socket: ::std::cell::RefCell<S>,
|
||||
phantom: $phantom,
|
||||
});
|
||||
|
||||
push(Annotatable::Item(client_struct_item.expect(&format!("could not generate client struct for {:?}", client_short_ident.name))));
|
||||
}
|
||||
|
||||
/// pushes generated code for the client class (type declaration and method invocation implementations)
|
||||
fn push_client(
|
||||
cx: &ExtCtxt,
|
||||
builder: &aster::AstBuilder,
|
||||
interface_map: &InterfaceMap,
|
||||
push: &mut FnMut(Annotatable),
|
||||
) {
|
||||
push_client_struct(cx, builder, interface_map, push);
|
||||
push_client_implementation(cx, builder, interface_map, push);
|
||||
push_with_socket_client_implementation(cx, builder, interface_map, push);
|
||||
}
|
||||
|
||||
|
||||
fn push_with_socket_client_implementation(
|
||||
cx: &ExtCtxt,
|
||||
builder: &aster::AstBuilder,
|
||||
item: &Item,
|
||||
interface_map: &InterfaceMap,
|
||||
push: &mut FnMut(Annotatable))
|
||||
{
|
||||
let (_, client_ident) = get_item_idents(builder, item);
|
||||
let generics = client_generics(builder, interface_map);
|
||||
let client_ident = client_qualified_ident(builder, interface_map);
|
||||
let where_clause = &generics.where_clause;
|
||||
let client_short_ident = interface_map.ident_map.client_ident(builder);
|
||||
|
||||
let implement = quote_item!(cx,
|
||||
impl<S> ::ipc::WithSocket<S> for $client_ident<S> where S: ::ipc::IpcSocket {
|
||||
fn init(socket: S) -> $client_ident<S> {
|
||||
$client_ident {
|
||||
impl $generics ::ipc::WithSocket<S> for $client_ident $where_clause {
|
||||
fn init(socket: S) -> $client_ident {
|
||||
$client_short_ident {
|
||||
socket: ::std::cell::RefCell::new(socket),
|
||||
phantom: ::std::marker::PhantomData,
|
||||
}
|
||||
@ -509,19 +581,22 @@ fn push_with_socket_client_implementation(
|
||||
fn push_client_implementation(
|
||||
cx: &ExtCtxt,
|
||||
builder: &aster::AstBuilder,
|
||||
dispatches: &[Dispatch],
|
||||
item: &Item,
|
||||
push: &mut FnMut(Annotatable))
|
||||
{
|
||||
let (item_ident, client_ident) = get_item_idents(builder, item);
|
||||
interface_map: &InterfaceMap,
|
||||
push: &mut FnMut(Annotatable),
|
||||
) {
|
||||
let item_ident = interface_map.ident_map.qualified_ident(builder);
|
||||
|
||||
let mut index = -1i32;
|
||||
let items = dispatches.iter()
|
||||
.map(|dispatch| { index = index + 1; P(implement_client_method(cx, builder, index as u16, dispatch)) })
|
||||
let items = interface_map.dispatches.iter()
|
||||
.map(|_| { index = index + 1; P(implement_client_method(cx, builder, index as u16, interface_map)) })
|
||||
.collect::<Vec<P<ast::ImplItem>>>();
|
||||
|
||||
let generics = client_generics(builder, interface_map);
|
||||
let client_ident = client_qualified_ident(builder, interface_map);
|
||||
let where_clause = &generics.where_clause;
|
||||
|
||||
let implement = quote_item!(cx,
|
||||
impl<S> $client_ident<S> where S: ::ipc::IpcSocket {
|
||||
impl $generics $client_ident $where_clause {
|
||||
pub fn handshake(&self) -> Result<(), ::ipc::Error> {
|
||||
let payload = BinHandshake {
|
||||
protocol_version: $item_ident::protocol_version().to_string(),
|
||||
@ -589,22 +664,52 @@ fn implement_handshake_arm(
|
||||
)
|
||||
}
|
||||
|
||||
fn get_item_idents(builder: &aster::AstBuilder, item: &Item) -> (::syntax::ast::Ident, ::syntax::ast::Ident) {
|
||||
let ty = match item.node {
|
||||
ast::ItemKind::Impl(_, _, _, _, ref ty, _) => ty.clone(),
|
||||
_ => { builder.ty().id("") }
|
||||
fn collect_tys(items: &[&MethodSig]) -> Vec<P<Ty>> {
|
||||
let mut result = Vec::new();
|
||||
for signature in items {
|
||||
result.extend(signature.decl.inputs.iter().map(|input_arg| input_arg.ty.clone()));
|
||||
if let FunctionRetTy::Ty(ref ty) = signature.decl.output {
|
||||
result.push(ty.clone())
|
||||
};
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
let (item_ident, client_ident) = match ty.node {
|
||||
::syntax::ast::TyKind::Path(_, ref path) => {
|
||||
(
|
||||
builder.id(format!("{}", path.segments[0].identifier)),
|
||||
builder.id(format!("{}Client", path.segments[0].identifier))
|
||||
)
|
||||
},
|
||||
struct InterfaceMap {
|
||||
pub original_item: Item,
|
||||
pub item: P<ast::Item>,
|
||||
pub dispatches: Vec<Dispatch>,
|
||||
pub replacements: HashMap<String, P<Ty>>,
|
||||
pub generics: Generics,
|
||||
pub impl_trait: Option<TraitRef>,
|
||||
pub ident_map: IdentMap,
|
||||
}
|
||||
|
||||
struct IdentMap {
|
||||
original_path: ast::Path,
|
||||
}
|
||||
|
||||
impl IdentMap {
|
||||
fn ident(&self, builder: &aster::AstBuilder) -> Ident {
|
||||
builder.id(format!("{}", ::syntax::print::pprust::path_to_string(&self.original_path)))
|
||||
}
|
||||
|
||||
fn client_ident(&self, builder: &aster::AstBuilder) -> Ident {
|
||||
builder.id(format!("{}Client", self.original_path.segments[0].identifier))
|
||||
}
|
||||
|
||||
fn qualified_ident(&self, builder: &aster::AstBuilder) -> Ident {
|
||||
builder.id(format!("{}", ::syntax::print::pprust::path_to_string(&self.original_path).replace("<", "::<")))
|
||||
}
|
||||
}
|
||||
|
||||
fn ty_ident_map(original_ty: &P<Ty>) -> IdentMap {
|
||||
let original_path = match original_ty.node {
|
||||
::syntax::ast::TyKind::Path(_, ref path) => path.clone(),
|
||||
_ => { panic!("incompatible implementation"); }
|
||||
};
|
||||
(item_ident, client_ident)
|
||||
let ident_map = IdentMap { original_path: original_path };
|
||||
ident_map
|
||||
}
|
||||
|
||||
/// implements `IpcInterface<C>` for the given class `C`
|
||||
@ -613,9 +718,9 @@ fn implement_interface(
|
||||
builder: &aster::AstBuilder,
|
||||
item: &Item,
|
||||
push: &mut FnMut(Annotatable),
|
||||
) -> Result<(P<ast::Item>, Vec<Dispatch>), Error> {
|
||||
let (generics, impl_items) = match item.node {
|
||||
ast::ItemKind::Impl(_, _, ref generics, _, _, ref impl_items) => (generics, impl_items),
|
||||
) -> Result<InterfaceMap, Error> {
|
||||
let (generics, impl_trait, original_ty, impl_items) = match item.node {
|
||||
ast::ItemKind::Impl(_, _, ref generics, ref impl_trait, ref ty, ref impl_items) => (generics, impl_trait, ty, impl_items),
|
||||
_ => {
|
||||
cx.span_err(
|
||||
item.span,
|
||||
@ -623,30 +728,35 @@ fn implement_interface(
|
||||
return Err(Error);
|
||||
}
|
||||
};
|
||||
|
||||
let impl_generics = builder.from_generics(generics.clone())
|
||||
.add_ty_param_bound(
|
||||
builder.path().global().ids(&["ethcore_ipc"]).build()
|
||||
)
|
||||
.build();
|
||||
|
||||
let impl_generics = builder.from_generics(generics.clone()).build();
|
||||
let where_clause = &impl_generics.where_clause;
|
||||
|
||||
let (ty, _) = get_item_idents(builder, item);
|
||||
|
||||
let mut dispatch_table = Vec::new();
|
||||
let mut method_signatures = Vec::new();
|
||||
for impl_item in impl_items {
|
||||
if let ImplItemKind::Method(ref signature, _) = impl_item.node {
|
||||
dispatch_table.push(push_invoke_signature_aster(builder, &impl_item, signature, push));
|
||||
method_signatures.push((impl_item, signature))
|
||||
}
|
||||
}
|
||||
|
||||
let dispatch_arms = implement_dispatch_arms(cx, builder, &dispatch_table, false);
|
||||
let dispatch_arms_buffered = implement_dispatch_arms(cx, builder, &dispatch_table, true);
|
||||
let all_tys = collect_tys(
|
||||
&method_signatures
|
||||
.iter()
|
||||
.map(|&(_, signature)| signature)
|
||||
.collect::<Vec<&MethodSig>>());
|
||||
|
||||
let replacements = typegen::match_unknown_tys(cx, builder, &all_tys, push);
|
||||
|
||||
let dispatch_table = method_signatures.iter().map(|&(impl_item, signature)|
|
||||
push_invoke_signature_aster(builder, impl_item, signature, &replacements, push))
|
||||
.collect::<Vec<Dispatch>>();
|
||||
|
||||
let dispatch_arms = implement_dispatch_arms(cx, builder, &dispatch_table, false, &replacements);
|
||||
let dispatch_arms_buffered = implement_dispatch_arms(cx, builder, &dispatch_table, true, &replacements);
|
||||
|
||||
let (handshake_arm, handshake_arm_buf) = implement_handshake_arm(cx);
|
||||
|
||||
Ok((quote_item!(cx,
|
||||
let ty = ty_ident_map(&original_ty).ident(builder);
|
||||
let ipc_item = quote_item!(cx,
|
||||
impl $impl_generics ::ipc::IpcInterface<$ty> for $ty $where_clause {
|
||||
fn dispatch<R>(&self, r: &mut R) -> Vec<u8>
|
||||
where R: ::std::io::Read
|
||||
@ -657,6 +767,7 @@ fn implement_interface(
|
||||
Err(e) => { panic!("ipc read error: {:?}, aborting", e); }
|
||||
_ => { }
|
||||
}
|
||||
|
||||
// method_num is a 16-bit little-endian unsigned number
|
||||
match method_num[1] as u16 + (method_num[0] as u16)*256 {
|
||||
// handshake
|
||||
@ -676,5 +787,15 @@ fn implement_interface(
|
||||
}
|
||||
}
|
||||
}
|
||||
).unwrap(), dispatch_table))
|
||||
).unwrap();
|
||||
|
||||
Ok(InterfaceMap {
|
||||
ident_map: ty_ident_map(&original_ty),
|
||||
original_item: item.clone(),
|
||||
item: ipc_item,
|
||||
dispatches: dispatch_table,
|
||||
replacements: replacements,
|
||||
generics: generics.clone(),
|
||||
impl_trait: impl_trait.clone(),
|
||||
})
|
||||
}
|
||||
|
@ -15,3 +15,4 @@
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
mod codegen;
|
||||
pub mod typegen;
|
||||
|
237
ipc/codegen/src/typegen.rs
Normal file
237
ipc/codegen/src/typegen.rs
Normal file
@ -0,0 +1,237 @@
|
||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
use aster;
|
||||
|
||||
use syntax::ast::{
|
||||
Ty,
|
||||
TyKind,
|
||||
Path,
|
||||
DUMMY_NODE_ID,
|
||||
};
|
||||
|
||||
use syntax::ast;
|
||||
use syntax::ext::base::{Annotatable, ExtCtxt};
|
||||
use syntax::ext::build::AstBuilder;
|
||||
use syntax::ptr::P;
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::ops::Deref;
|
||||
|
||||
fn is_new_entry(path: &Path) -> Option<String> {
|
||||
let known = {
|
||||
if path.segments.len() > 1 {
|
||||
false
|
||||
} else {
|
||||
let ident = format!("{}", path.segments[0].identifier.name.as_str());
|
||||
ident == "u8" ||
|
||||
ident == "i8" ||
|
||||
ident == "u16" ||
|
||||
ident == "i16" ||
|
||||
ident == "u32" ||
|
||||
ident == "u64" ||
|
||||
ident == "usize" ||
|
||||
ident == "i32" ||
|
||||
ident == "i64" ||
|
||||
ident == "String" ||
|
||||
ident == "bool"
|
||||
}
|
||||
};
|
||||
|
||||
if known { None }
|
||||
else { Some(::syntax::print::pprust::path_to_string(path)) }
|
||||
}
|
||||
|
||||
pub fn argument_replacement(
|
||||
builder: &aster::AstBuilder,
|
||||
replacements: &HashMap<String, P<Ty>>,
|
||||
ty: &P<Ty>,
|
||||
) -> Option<P<Ty>> {
|
||||
match ty.node {
|
||||
TyKind::Vec(ref nested_ty) => {
|
||||
argument_replacement(builder, replacements, nested_ty).and_then(|replaced_with| {
|
||||
let mut inplace_ty = nested_ty.deref().clone();
|
||||
inplace_ty.node = TyKind::Vec(replaced_with);
|
||||
inplace_ty.id = DUMMY_NODE_ID;
|
||||
Some(P(inplace_ty))
|
||||
})
|
||||
},
|
||||
TyKind::FixedLengthVec(ref nested_ty, ref len_expr) => {
|
||||
argument_replacement(builder, replacements, nested_ty).and_then(|replaced_with| {
|
||||
let mut inplace_ty = nested_ty.deref().clone();
|
||||
inplace_ty.node = TyKind::FixedLengthVec(replaced_with, len_expr.clone());
|
||||
inplace_ty.id = DUMMY_NODE_ID;
|
||||
Some(P(inplace_ty))
|
||||
})
|
||||
},
|
||||
TyKind::Path(_, ref path) => {
|
||||
if path.segments.len() > 0 && path.segments[0].identifier.name.as_str() == "Option" ||
|
||||
path.segments[0].identifier.name.as_str() == "Result" {
|
||||
|
||||
let nested_ty = &path.segments[0].parameters.types()[0];
|
||||
argument_replacement(builder, replacements, nested_ty).and_then(|replaced_with| {
|
||||
let mut inplace_path = path.clone();
|
||||
match inplace_path.segments[0].parameters {
|
||||
ast::PathParameters::AngleBracketed(ref mut data) => {
|
||||
data.types = data.types.map(|_| replaced_with.clone());
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
let mut inplace_ty = nested_ty.deref().deref().clone();
|
||||
inplace_ty.node = TyKind::Path(None, inplace_path);
|
||||
inplace_ty.id = DUMMY_NODE_ID;
|
||||
Some(P(inplace_ty))
|
||||
})
|
||||
}
|
||||
else {
|
||||
replacements.get(&::syntax::print::pprust::path_to_string(path)).and_then(|replaced_with| {
|
||||
Some(replaced_with.clone())
|
||||
})
|
||||
}
|
||||
}
|
||||
_ => { None }
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push_bin_box(
|
||||
cx: &ExtCtxt,
|
||||
builder: &aster::AstBuilder,
|
||||
ty: &Ty,
|
||||
bbox_name: &str,
|
||||
push: &mut FnMut(Annotatable),
|
||||
) {
|
||||
let ident = builder.id(bbox_name);
|
||||
let bin_box_struct = quote_item!(cx,
|
||||
struct $ident ($ty);
|
||||
).unwrap();
|
||||
push(Annotatable::Item(bin_box_struct));
|
||||
push(Annotatable::Item(quote_item!(cx,
|
||||
impl From<$ty> for $ident {
|
||||
fn from(val: $ty) -> $ident {
|
||||
$ident(val)
|
||||
}
|
||||
}).unwrap()));
|
||||
|
||||
push(Annotatable::Item(quote_item!(cx,
|
||||
impl Into<$ty> for $ident {
|
||||
fn into(self) -> $ty {
|
||||
let $ident(val) = self;
|
||||
val
|
||||
}
|
||||
}).unwrap()));
|
||||
|
||||
let serialize_impl = quote_item!(cx,
|
||||
impl ::serde::ser::Serialize for $ident {
|
||||
fn serialize<__S>(&self, _serializer: &mut __S) -> ::std::result::Result<(), __S::Error>
|
||||
where __S: ::serde::ser::Serializer
|
||||
{
|
||||
let &$ident(ref val) = self;
|
||||
_serializer.serialize_bytes(val.as_slice())
|
||||
}
|
||||
}).unwrap();
|
||||
|
||||
let ident_expr = builder.id(::syntax::print::pprust::ty_to_string(ty));
|
||||
|
||||
let deserialize_impl = quote_item!(cx,
|
||||
impl ::serde::de::Deserialize for $ident {
|
||||
fn deserialize<__D>(deserializer: &mut __D) -> ::std::result::Result<$ident, __D::Error>
|
||||
where __D: ::serde::de::Deserializer
|
||||
{
|
||||
struct __Visitor<__D: ::serde::de::Deserializer>(::std::marker::PhantomData<__D>);
|
||||
|
||||
impl <__D: ::serde::de::Deserializer> ::serde::de::Visitor for __Visitor<__D> {
|
||||
type Value = $ident;
|
||||
#[inline]
|
||||
fn visit_seq<__V>(&mut self, mut visitor: __V) -> ::std::result::Result<$ident, __V::Error>
|
||||
where __V: ::serde::de::SeqVisitor
|
||||
{
|
||||
let raw_bytes: Vec<u8> = try!(visitor.visit()).unwrap_or_else(|| Vec::new());
|
||||
let inner = $ident_expr ::from_bytes(&raw_bytes).unwrap();
|
||||
Ok($ident (inner))
|
||||
}
|
||||
|
||||
}
|
||||
deserializer.deserialize_bytes(__Visitor::<__D>(::std::marker::PhantomData))
|
||||
}
|
||||
|
||||
}).unwrap();
|
||||
|
||||
push(Annotatable::Item(serialize_impl));
|
||||
push(Annotatable::Item(deserialize_impl));
|
||||
}
|
||||
|
||||
pub fn match_unknown_tys(
|
||||
cx: &ExtCtxt,
|
||||
builder: &aster::AstBuilder,
|
||||
tys: &[P<Ty>],
|
||||
push: &mut FnMut(Annotatable),
|
||||
) -> HashMap<String, P<Ty>>
|
||||
{
|
||||
let mut hash_map = HashMap::new();
|
||||
let mut fringe = Vec::new();
|
||||
fringe.extend(tys);
|
||||
let mut stop_list = HashSet::new();
|
||||
let mut index = 0;
|
||||
|
||||
loop {
|
||||
if fringe.len() == 0 { break; }
|
||||
let drained = fringe.drain(..1).collect::<Vec<&P<Ty>>>();
|
||||
let ty = drained[0];
|
||||
stop_list.insert(ty);
|
||||
|
||||
match ty.node {
|
||||
TyKind::Vec(ref nested_ty) => {
|
||||
if !stop_list.contains(nested_ty) {
|
||||
fringe.push(nested_ty);
|
||||
}
|
||||
},
|
||||
TyKind::FixedLengthVec(ref nested_ty, _) => {
|
||||
if !stop_list.contains(nested_ty) {
|
||||
fringe.push(nested_ty);
|
||||
}
|
||||
},
|
||||
TyKind::Path(_, ref path) => {
|
||||
if path.segments.len() > 0 && {
|
||||
let first_segment = path.segments[0].identifier.name.as_str();
|
||||
first_segment == "Option" || first_segment == "Result" || first_segment == "Vec"
|
||||
}
|
||||
{
|
||||
let extra_type = &path.segments[0].parameters.types()[0];
|
||||
if !stop_list.contains(extra_type) {
|
||||
fringe.push(extra_type);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
match is_new_entry(path) {
|
||||
Some(old_path) => {
|
||||
if hash_map.get(&old_path).is_some() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let bin_box_name = format!("BinBox{}", index);
|
||||
push_bin_box(cx, builder, &ty, &bin_box_name, push);
|
||||
hash_map.insert(old_path, builder.ty().id(&bin_box_name));
|
||||
index = index + 1;
|
||||
},
|
||||
None => {}
|
||||
}
|
||||
},
|
||||
_ => { }
|
||||
}
|
||||
}
|
||||
|
||||
hash_map
|
||||
}
|
@ -8,5 +8,5 @@ license = "GPL-3.0"
|
||||
|
||||
[dependencies]
|
||||
"ethcore-ipc" = { path = "../rpc" }
|
||||
nanomsg = "0.5.0"
|
||||
nanomsg = { git = "https://github.com/ethcore/nanomsg.rs.git" }
|
||||
log = "0.3"
|
||||
|
@ -54,7 +54,7 @@ impl<S> Deref for GuardedSocket<S> where S: WithSocket<Socket> {
|
||||
/// Spawns client <`S`> over specified address
|
||||
/// creates socket and connects endpoint to it
|
||||
/// for duplex (paired) connections with the service
|
||||
pub fn init_client<S>(socket_addr: &str) -> Result<GuardedSocket<S>, SocketError> where S: WithSocket<Socket> {
|
||||
pub fn init_duplex_client<S>(socket_addr: &str) -> Result<GuardedSocket<S>, SocketError> where S: WithSocket<Socket> {
|
||||
let mut socket = try!(Socket::new(Protocol::Pair).map_err(|e| {
|
||||
warn!(target: "ipc", "Failed to create ipc socket: {:?}", e);
|
||||
SocketError::DuplexLink
|
||||
@ -71,16 +71,38 @@ pub fn init_client<S>(socket_addr: &str) -> Result<GuardedSocket<S>, SocketError
|
||||
})
|
||||
}
|
||||
|
||||
/// Spawns client <`S`> over specified address
|
||||
/// creates socket and connects endpoint to it
|
||||
/// for request-reply connections to the service
|
||||
pub fn init_client<S>(socket_addr: &str) -> Result<GuardedSocket<S>, SocketError> where S: WithSocket<Socket> {
|
||||
let mut socket = try!(Socket::new(Protocol::Req).map_err(|e| {
|
||||
warn!(target: "ipc", "Failed to create ipc socket: {:?}", e);
|
||||
SocketError::RequestLink
|
||||
}));
|
||||
|
||||
let endpoint = try!(socket.connect(socket_addr).map_err(|e| {
|
||||
warn!(target: "ipc", "Failed to bind socket to address '{}': {:?}", socket_addr, e);
|
||||
SocketError::RequestLink
|
||||
}));
|
||||
|
||||
Ok(GuardedSocket {
|
||||
client: Arc::new(S::init(socket)),
|
||||
_endpoint: endpoint,
|
||||
})
|
||||
}
|
||||
|
||||
/// Error occured while establising socket or endpoint
|
||||
#[derive(Debug)]
|
||||
pub enum SocketError {
|
||||
/// Error establising duplex (paired) socket and/or endpoint
|
||||
DuplexLink
|
||||
DuplexLink,
|
||||
/// Error establising duplex (paired) socket and/or endpoint
|
||||
RequestLink,
|
||||
}
|
||||
|
||||
impl<S> Worker<S> where S: IpcInterface<S> {
|
||||
/// New worker over specified `service`
|
||||
pub fn new(service: Arc<S>) -> Worker<S> {
|
||||
pub fn new(service: &Arc<S>) -> Worker<S> {
|
||||
Worker::<S> {
|
||||
service: service.clone(),
|
||||
sockets: Vec::new(),
|
||||
@ -103,7 +125,7 @@ impl<S> Worker<S> where S: IpcInterface<S> {
|
||||
if method_sign_len >= 2 {
|
||||
|
||||
// method_num
|
||||
let method_num = self.buf[1] as u16 * 256 + self.buf[0] as u16;
|
||||
let method_num = self.buf[0] as u16 * 256 + self.buf[1] as u16;
|
||||
// payload
|
||||
let payload = &self.buf[2..];
|
||||
|
||||
@ -155,6 +177,26 @@ impl<S> Worker<S> where S: IpcInterface<S> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Add generic socket for request-reply style communications
|
||||
/// with multiple clients
|
||||
pub fn add_reqrep(&mut self, addr: &str) -> Result<(), SocketError> {
|
||||
let mut socket = try!(Socket::new(Protocol::Rep).map_err(|e| {
|
||||
warn!(target: "ipc", "Failed to create ipc socket: {:?}", e);
|
||||
SocketError::DuplexLink
|
||||
}));
|
||||
|
||||
let endpoint = try!(socket.bind(addr).map_err(|e| {
|
||||
warn!(target: "ipc", "Failed to bind socket to address '{}': {:?}", addr, e);
|
||||
SocketError::DuplexLink
|
||||
}));
|
||||
|
||||
self.sockets.push((socket, endpoint));
|
||||
|
||||
self.rebuild_poll_request();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -206,13 +248,13 @@ mod service_tests {
|
||||
|
||||
#[test]
|
||||
fn can_create_worker() {
|
||||
let worker = Worker::<DummyService>::new(Arc::new(DummyService::new()));
|
||||
let worker = Worker::<DummyService>::new(&Arc::new(DummyService::new()));
|
||||
assert_eq!(0, worker.sockets.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_add_duplex_socket_to_worker() {
|
||||
let mut worker = Worker::<DummyService>::new(Arc::new(DummyService::new()));
|
||||
let mut worker = Worker::<DummyService>::new(&Arc::new(DummyService::new()));
|
||||
worker.add_duplex("ipc:///tmp/parity-test10.ipc").unwrap();
|
||||
assert_eq!(1, worker.sockets.len());
|
||||
}
|
||||
@ -220,7 +262,7 @@ mod service_tests {
|
||||
#[test]
|
||||
fn worker_can_poll_empty() {
|
||||
let service = Arc::new(DummyService::new());
|
||||
let mut worker = Worker::<DummyService>::new(service.clone());
|
||||
let mut worker = Worker::<DummyService>::new(&service);
|
||||
worker.add_duplex("ipc:///tmp/parity-test20.ipc").unwrap();
|
||||
worker.poll();
|
||||
assert_eq!(0, service.methods_stack.read().unwrap().len());
|
||||
@ -230,7 +272,7 @@ mod service_tests {
|
||||
fn worker_can_poll() {
|
||||
let url = "ipc:///tmp/parity-test30.ipc";
|
||||
|
||||
let mut worker = Worker::<DummyService>::new(Arc::new(DummyService::new()));
|
||||
let mut worker = Worker::<DummyService>::new(&Arc::new(DummyService::new()));
|
||||
worker.add_duplex(url).unwrap();
|
||||
|
||||
let (_socket, _endpoint) = dummy_write(url, &vec![0, 0, 7, 7, 6, 6]);
|
||||
@ -245,7 +287,7 @@ mod service_tests {
|
||||
fn worker_can_poll_long() {
|
||||
let url = "ipc:///tmp/parity-test40.ipc";
|
||||
|
||||
let mut worker = Worker::<DummyService>::new(Arc::new(DummyService::new()));
|
||||
let mut worker = Worker::<DummyService>::new(&Arc::new(DummyService::new()));
|
||||
worker.add_duplex(url).unwrap();
|
||||
|
||||
let message = [0u8; 1024*1024];
|
||||
|
@ -9,4 +9,4 @@ license = "GPL-3.0"
|
||||
[dependencies]
|
||||
ethcore-devtools = { path = "../../devtools" }
|
||||
semver = "0.2.0"
|
||||
nanomsg = "0.5.0"
|
||||
nanomsg = { git = "https://github.com/ethcore/nanomsg.rs.git" }
|
||||
|
@ -13,9 +13,9 @@ bincode = "*"
|
||||
serde = "0.7.0"
|
||||
ethcore-devtools = { path = "../../devtools" }
|
||||
semver = "0.2.0"
|
||||
nanomsg = "0.5.0"
|
||||
nanomsg = { git = "https://github.com/ethcore/nanomsg.rs.git" }
|
||||
ethcore-ipc-nano = { path = "../nano" }
|
||||
|
||||
ethcore-util = { path = "../../util" }
|
||||
|
||||
[build-dependencies]
|
||||
syntex = "*"
|
||||
|
@ -24,6 +24,24 @@ use std::path::Path;
|
||||
pub fn main() {
|
||||
let out_dir = env::var_os("OUT_DIR").unwrap();
|
||||
|
||||
// ipc pass
|
||||
{
|
||||
let src = Path::new("nested.rs.in");
|
||||
let dst = Path::new(&out_dir).join("nested_ipc.rs");
|
||||
let mut registry = syntex::Registry::new();
|
||||
codegen::register(&mut registry);
|
||||
registry.expand("", &src, &dst).unwrap();
|
||||
}
|
||||
|
||||
// serde pass
|
||||
{
|
||||
let src = Path::new(&out_dir).join("nested_ipc.rs");
|
||||
let dst = Path::new(&out_dir).join("nested_cg.rs");
|
||||
let mut registry = syntex::Registry::new();
|
||||
serde_codegen::register(&mut registry);
|
||||
registry.expand("", &src, &dst).unwrap();
|
||||
}
|
||||
|
||||
// ipc pass
|
||||
{
|
||||
let src = Path::new("service.rs.in");
|
||||
@ -41,4 +59,5 @@ pub fn main() {
|
||||
serde_codegen::register(&mut registry);
|
||||
registry.expand("", &src, &dst).unwrap();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -18,6 +18,7 @@
|
||||
mod tests {
|
||||
|
||||
use super::super::service::*;
|
||||
use super::super::nested::DBClient;
|
||||
use ipc::*;
|
||||
use devtools::*;
|
||||
use semver::Version;
|
||||
@ -101,4 +102,45 @@ mod tests {
|
||||
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_use_custom_params() {
|
||||
let mut socket = TestSocket::new();
|
||||
socket.read_buffer = vec![1];
|
||||
let service_client = ServiceClient::init(socket);
|
||||
|
||||
let result = service_client.push_custom(CustomData { a: 3, b: 11});
|
||||
|
||||
assert_eq!(vec![
|
||||
// message num..
|
||||
0, 18,
|
||||
// payload length
|
||||
0, 0, 0, 0, 0, 0, 0, 16,
|
||||
// structure raw bytes (bigendians :( )
|
||||
3, 0, 0, 0, 0, 0, 0, 0,
|
||||
11, 0, 0, 0, 0, 0, 0, 0],
|
||||
service_client.socket().borrow().write_buffer.clone());
|
||||
assert_eq!(true, result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_invoke_generic_service() {
|
||||
let mut socket = TestSocket::new();
|
||||
socket.read_buffer = vec![0, 0, 0, 0];
|
||||
let db_client = DBClient::<u64, _>::init(socket);
|
||||
|
||||
let result = db_client.write(vec![0u8; 100]);
|
||||
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
#[test]
|
||||
fn can_handshake_generic_service() {
|
||||
let mut socket = TestSocket::new();
|
||||
socket.read_buffer = vec![1];
|
||||
let db_client = DBClient::<u64, _>::init(socket);
|
||||
|
||||
let result = db_client.handshake();
|
||||
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
}
|
||||
|
17
ipc/tests/nested.rs
Normal file
17
ipc/tests/nested.rs
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
include!(concat!(env!("OUT_DIR"), "/nested_cg.rs"));
|
44
ipc/tests/nested.rs.in
Normal file
44
ipc/tests/nested.rs.in
Normal file
@ -0,0 +1,44 @@
|
||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::sync::RwLock;
|
||||
use std::ops::*;
|
||||
use ipc::IpcConfig;
|
||||
|
||||
pub struct DB<L: Sized> {
|
||||
pub writes: RwLock<u64>,
|
||||
pub reads: RwLock<u64>,
|
||||
pub holdings: L,
|
||||
}
|
||||
|
||||
trait DBWriter {
|
||||
fn write(&self, data: Vec<u8>) -> Result<(), DBError>;
|
||||
}
|
||||
|
||||
impl<L: Sized> IpcConfig for DB<L> {}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub enum DBError { Write, Read }
|
||||
|
||||
#[derive(Ipc)]
|
||||
impl<L: Sized> DBWriter for DB<L> {
|
||||
fn write(&self, data: Vec<u8>) -> Result<(), DBError> {
|
||||
let mut writes = self.writes.write().unwrap();
|
||||
*writes = *writes + data.len() as u64;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -32,14 +32,14 @@ mod tests {
|
||||
|
||||
|
||||
fn init_worker(addr: &str) -> nanoipc::Worker<Service> {
|
||||
let mut worker = nanoipc::Worker::<Service>::new(Arc::new(Service::new()));
|
||||
let mut worker = nanoipc::Worker::<Service>::new(&Arc::new(Service::new()));
|
||||
worker.add_duplex(addr).unwrap();
|
||||
worker
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_create_client() {
|
||||
let client = nanoipc::init_client::<ServiceClient<_>>("ipc:///tmp/parity-nano-test10.ipc");
|
||||
let client = nanoipc::init_duplex_client::<ServiceClient<_>>("ipc:///tmp/parity-nano-test10.ipc");
|
||||
assert!(client.is_ok());
|
||||
}
|
||||
|
||||
@ -60,7 +60,7 @@ mod tests {
|
||||
});
|
||||
|
||||
while !worker_is_ready.load(Ordering::Relaxed) { }
|
||||
let client = nanoipc::init_client::<ServiceClient<_>>(url).unwrap();
|
||||
let client = nanoipc::init_duplex_client::<ServiceClient<_>>(url).unwrap();
|
||||
|
||||
let hs = client.handshake();
|
||||
|
||||
@ -105,5 +105,4 @@ mod tests {
|
||||
|
||||
worker_should_exit.store(true, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -14,6 +14,8 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
extern crate bincode;
|
||||
extern crate ethcore_ipc as ipc;
|
||||
extern crate serde;
|
||||
@ -21,7 +23,9 @@ extern crate ethcore_devtools as devtools;
|
||||
extern crate semver;
|
||||
extern crate nanomsg;
|
||||
extern crate ethcore_ipc_nano as nanoipc;
|
||||
extern crate ethcore_util as util;
|
||||
|
||||
pub mod service;
|
||||
mod examples;
|
||||
mod over_nano;
|
||||
mod nested;
|
||||
|
@ -16,13 +16,42 @@
|
||||
|
||||
use std::sync::RwLock;
|
||||
use std::ops::*;
|
||||
use std::convert::*;
|
||||
use ipc::IpcConfig;
|
||||
use util::bytes::{FromRawBytes, BytesConvertable, FromBytesError};
|
||||
|
||||
pub struct Service {
|
||||
pub commits: RwLock<usize>,
|
||||
pub rollbacks: RwLock<usize>,
|
||||
}
|
||||
|
||||
pub struct CustomData {
|
||||
pub a: usize,
|
||||
pub b: usize,
|
||||
}
|
||||
|
||||
impl FromRawBytes for CustomData {
|
||||
fn from_bytes(bytes: &[u8]) -> Result<CustomData, FromBytesError> {
|
||||
Ok(CustomData {
|
||||
a: bytes[0] as usize * 256 + bytes[1] as usize,
|
||||
b: bytes[2] as usize * 256 + bytes[3] as usize
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl BytesConvertable for CustomData {
|
||||
fn bytes(&self) -> &[u8] {
|
||||
let ip: *const CustomData = self;
|
||||
let ptr: *const u8 = ip as *const _;
|
||||
unsafe {
|
||||
::std::slice::from_raw_parts(
|
||||
ptr,
|
||||
::std::mem::size_of::<CustomData>()
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Ipc)]
|
||||
impl Service {
|
||||
fn commit(&self, f: u32) -> u32 {
|
||||
@ -36,6 +65,15 @@ impl Service {
|
||||
*lock = *lock + a_0 as usize - b as usize;
|
||||
(a_0 - b) as i32
|
||||
}
|
||||
pub fn push_custom(&self, data: CustomData) -> bool {
|
||||
let mut clock = self.commits.write().unwrap();
|
||||
let mut rlock = self.commits.write().unwrap();
|
||||
|
||||
*clock = data.a;
|
||||
*rlock = data.b;
|
||||
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
impl Service {
|
||||
|
@ -64,7 +64,7 @@ mod transaction_queue;
|
||||
pub use transaction_queue::{TransactionQueue, AccountDetails};
|
||||
pub use miner::{Miner};
|
||||
|
||||
use util::{H256, U256, Address, FixedHash, Bytes};
|
||||
use util::{H256, U256, Address, Bytes};
|
||||
use ethcore::client::{BlockChainClient};
|
||||
use ethcore::block::{ClosedBlock};
|
||||
use ethcore::error::{Error};
|
||||
@ -77,14 +77,29 @@ pub trait MinerService : Send + Sync {
|
||||
fn status(&self) -> MinerStatus;
|
||||
|
||||
/// Get the author that we will seal blocks as.
|
||||
fn author(&self) -> Address { Address::zero() }
|
||||
fn author(&self) -> Address;
|
||||
|
||||
/// Get the extra_data that we will seal blocks wuth.
|
||||
fn extra_data(&self) -> Bytes { vec![] }
|
||||
/// Set the author that we will seal blocks as.
|
||||
fn set_author(&self, author: Address);
|
||||
|
||||
/// Get the extra_data that we will seal blocks with.
|
||||
fn extra_data(&self) -> Bytes;
|
||||
|
||||
/// Set the extra_data that we will seal blocks with.
|
||||
fn set_extra_data(&self, extra_data: Bytes);
|
||||
|
||||
/// Get current minimal gas price for transactions accepted to queue.
|
||||
fn minimal_gas_price(&self) -> U256;
|
||||
|
||||
/// Set minimal gas price of transaction to be accepted for mining.
|
||||
fn set_minimal_gas_price(&self, min_gas_price: U256);
|
||||
|
||||
/// Get the gas limit we wish to target when sealing a new block.
|
||||
fn gas_floor_target(&self) -> U256;
|
||||
|
||||
/// Set the gas limit we wish to target when sealing a new block.
|
||||
fn set_gas_floor_target(&self, target: U256);
|
||||
|
||||
/// Imports transactions to transaction queue.
|
||||
fn import_transactions<T>(&self, transactions: Vec<SignedTransaction>, fetch_account: T) -> Vec<Result<(), Error>>
|
||||
where T: Fn(&Address) -> AccountDetails;
|
||||
@ -117,8 +132,11 @@ pub trait MinerService : Send + Sync {
|
||||
/// Returns highest transaction nonce for given address.
|
||||
fn last_nonce(&self, address: &Address) -> Option<U256>;
|
||||
|
||||
/// Suggested gas price
|
||||
/// Suggested gas price.
|
||||
fn sensible_gas_price(&self) -> U256 { x!(20000000000u64) }
|
||||
|
||||
/// Suggested gas limit.
|
||||
fn sensible_gas_limit(&self) -> U256 { x!(21000) }
|
||||
}
|
||||
|
||||
/// Mining status
|
||||
|
@ -69,26 +69,6 @@ impl Miner {
|
||||
})
|
||||
}
|
||||
|
||||
/// Set the author that we will seal blocks as.
|
||||
pub fn set_author(&self, author: Address) {
|
||||
*self.author.write().unwrap() = author;
|
||||
}
|
||||
|
||||
/// Set the extra_data that we will seal blocks with.
|
||||
pub fn set_extra_data(&self, extra_data: Bytes) {
|
||||
*self.extra_data.write().unwrap() = extra_data;
|
||||
}
|
||||
|
||||
/// Set the gas limit we wish to target when sealing a new block.
|
||||
pub fn set_gas_floor_target(&self, target: U256) {
|
||||
*self.gas_floor_target.write().unwrap() = target;
|
||||
}
|
||||
|
||||
/// Set minimal gas price of transaction to be accepted for mining.
|
||||
pub fn set_minimal_gas_price(&self, min_gas_price: U256) {
|
||||
self.transaction_queue.lock().unwrap().set_minimal_gas_price(min_gas_price);
|
||||
}
|
||||
|
||||
/// Prepares new block for sealing including top transactions from queue.
|
||||
#[cfg_attr(feature="dev", allow(match_same_arms))]
|
||||
fn prepare_sealing(&self, chain: &BlockChainClient) {
|
||||
@ -153,13 +133,13 @@ impl Miner {
|
||||
}
|
||||
};
|
||||
let mut queue = self.transaction_queue.lock().unwrap();
|
||||
queue.remove_all(
|
||||
&invalid_transactions.into_iter().collect::<Vec<H256>>(),
|
||||
|a: &Address| AccountDetails {
|
||||
let fetch_account = |a: &Address| AccountDetails {
|
||||
nonce: chain.nonce(a),
|
||||
balance: chain.balance(a),
|
||||
};
|
||||
for hash in invalid_transactions.into_iter() {
|
||||
queue.remove_invalid(&hash, &fetch_account);
|
||||
}
|
||||
);
|
||||
if let Some(block) = b {
|
||||
if sealing_work.peek_last_ref().map_or(true, |pb| pb.block().fields().header.hash() != block.block().fields().header.hash()) {
|
||||
trace!(target: "miner", "Pushing a new, refreshed or borrowed pending {}...", block.block().fields().header.hash());
|
||||
@ -195,11 +175,36 @@ impl MinerService for Miner {
|
||||
}
|
||||
}
|
||||
|
||||
fn set_author(&self, author: Address) {
|
||||
*self.author.write().unwrap() = author;
|
||||
}
|
||||
|
||||
fn set_extra_data(&self, extra_data: Bytes) {
|
||||
*self.extra_data.write().unwrap() = extra_data;
|
||||
}
|
||||
|
||||
/// Set the gas limit we wish to target when sealing a new block.
|
||||
fn set_gas_floor_target(&self, target: U256) {
|
||||
*self.gas_floor_target.write().unwrap() = target;
|
||||
}
|
||||
|
||||
fn set_minimal_gas_price(&self, min_gas_price: U256) {
|
||||
self.transaction_queue.lock().unwrap().set_minimal_gas_price(min_gas_price);
|
||||
}
|
||||
|
||||
fn minimal_gas_price(&self) -> U256 {
|
||||
*self.transaction_queue.lock().unwrap().minimal_gas_price()
|
||||
}
|
||||
|
||||
fn sensible_gas_price(&self) -> U256 {
|
||||
// 10% above our minimum.
|
||||
*self.transaction_queue.lock().unwrap().minimal_gas_price() * x!(110) / x!(100)
|
||||
}
|
||||
|
||||
fn sensible_gas_limit(&self) -> U256 {
|
||||
*self.gas_floor_target.read().unwrap() / x!(5)
|
||||
}
|
||||
|
||||
/// Get the author that we will seal blocks as.
|
||||
fn author(&self) -> Address {
|
||||
*self.author.read().unwrap()
|
||||
@ -294,7 +299,7 @@ impl MinerService for Miner {
|
||||
}
|
||||
}
|
||||
|
||||
fn chain_new_blocks(&self, chain: &BlockChainClient, imported: &[H256], invalid: &[H256], enacted: &[H256], retracted: &[H256]) {
|
||||
fn chain_new_blocks(&self, chain: &BlockChainClient, _imported: &[H256], _invalid: &[H256], enacted: &[H256], retracted: &[H256]) {
|
||||
fn fetch_transactions(chain: &BlockChainClient, hash: &H256) -> Vec<SignedTransaction> {
|
||||
let block = chain
|
||||
.block(BlockId::Hash(*hash))
|
||||
@ -304,6 +309,11 @@ impl MinerService for Miner {
|
||||
block.transactions()
|
||||
}
|
||||
|
||||
// 1. We ignore blocks that were `imported` (because it means that they are not in canon-chain, and transactions
|
||||
// should be still available in the queue.
|
||||
// 2. We ignore blocks that are `invalid` because it doesn't have any meaning in terms of the transactions that
|
||||
// are in those blocks
|
||||
|
||||
// First update gas limit in transaction queue
|
||||
self.update_gas_limit(chain);
|
||||
|
||||
@ -325,29 +335,23 @@ impl MinerService for Miner {
|
||||
});
|
||||
}
|
||||
|
||||
// ...and after that remove old ones
|
||||
// ...and at the end remove old ones
|
||||
{
|
||||
let in_chain = {
|
||||
let mut in_chain = HashSet::new();
|
||||
in_chain.extend(imported);
|
||||
in_chain.extend(enacted);
|
||||
in_chain.extend(invalid);
|
||||
in_chain
|
||||
.into_iter()
|
||||
.collect::<Vec<H256>>()
|
||||
};
|
||||
|
||||
let in_chain = in_chain
|
||||
let in_chain = enacted
|
||||
.par_iter()
|
||||
.map(|h: &H256| fetch_transactions(chain, h));
|
||||
|
||||
in_chain.for_each(|txs| {
|
||||
let hashes = txs.iter().map(|tx| tx.hash()).collect::<Vec<H256>>();
|
||||
in_chain.for_each(|mut txs| {
|
||||
let mut transaction_queue = self.transaction_queue.lock().unwrap();
|
||||
transaction_queue.remove_all(&hashes, |a| AccountDetails {
|
||||
nonce: chain.nonce(a),
|
||||
balance: chain.balance(a)
|
||||
});
|
||||
|
||||
let to_remove = txs.drain(..)
|
||||
.map(|tx| {
|
||||
tx.sender().expect("Transaction is in block, so sender has to be defined.")
|
||||
})
|
||||
.collect::<HashSet<Address>>();
|
||||
for sender in to_remove.into_iter() {
|
||||
transaction_queue.remove_all(sender, chain.nonce(&sender));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -65,8 +65,8 @@
|
||||
//! assert_eq!(top[1], st2);
|
||||
//!
|
||||
//! // And when transaction is removed (but nonce haven't changed)
|
||||
//! // it will move invalid transactions to future
|
||||
//! txq.remove(&st1.hash(), &default_nonce);
|
||||
//! // it will move subsequent transactions to future
|
||||
//! txq.remove_invalid(&st1.hash(), &default_nonce);
|
||||
//! assert_eq!(txq.status().pending, 0);
|
||||
//! assert_eq!(txq.status().future, 1);
|
||||
//! assert_eq!(txq.top_transactions().len(), 0);
|
||||
@ -76,11 +76,13 @@
|
||||
//! # Maintaing valid state
|
||||
//!
|
||||
//! 1. Whenever transaction is imported to queue (to queue) all other transactions from this sender are revalidated in current. It means that they are moved to future and back again (height recalculation & gap filling).
|
||||
//! 2. Whenever transaction is removed:
|
||||
//! 2. Whenever invalid transaction is removed:
|
||||
//! - When it's removed from `future` - all `future` transactions heights are recalculated and then
|
||||
//! we check if the transactions should go to `current` (comparing state nonce)
|
||||
//! - When it's removed from `current` - all transactions from this sender (`current` & `future`) are recalculated.
|
||||
//!
|
||||
//! 3. `remove_all` is used to inform the queue about client (state) nonce changes.
|
||||
//! - It removes all transactions (either from `current` or `future`) with nonce < client nonce
|
||||
//! - It moves matching `future` transactions to `current`
|
||||
|
||||
use std::default::Default;
|
||||
use std::cmp::{Ordering};
|
||||
@ -398,22 +400,28 @@ impl TransactionQueue {
|
||||
self.import_tx(vtx, client_account.nonce).map_err(Error::Transaction)
|
||||
}
|
||||
|
||||
/// Removes all transactions identified by hashes given in slice
|
||||
///
|
||||
/// If gap is introduced marks subsequent transactions as future
|
||||
pub fn remove_all<T>(&mut self, transaction_hashes: &[H256], fetch_account: T)
|
||||
where T: Fn(&Address) -> AccountDetails {
|
||||
for hash in transaction_hashes {
|
||||
self.remove(&hash, &fetch_account);
|
||||
}
|
||||
/// Removes all transactions from particular sender up to (excluding) given client (state) nonce.
|
||||
/// Client (State) Nonce = next valid nonce for this sender.
|
||||
pub fn remove_all(&mut self, sender: Address, client_nonce: U256) {
|
||||
// We will either move transaction to future or remove it completely
|
||||
// so there will be no transactions from this sender in current
|
||||
self.last_nonces.remove(&sender);
|
||||
// First update height of transactions in future to avoid collisions
|
||||
self.update_future(&sender, client_nonce);
|
||||
// This should move all current transactions to future and remove old transactions
|
||||
self.move_all_to_future(&sender, client_nonce);
|
||||
// And now lets check if there is some batch of transactions in future
|
||||
// that should be placed in current. It should also update last_nonces.
|
||||
self.move_matching_future_to_current(sender, client_nonce, client_nonce);
|
||||
}
|
||||
|
||||
/// Removes transaction identified by hashes from queue.
|
||||
/// Removes invalid transaction identified by hash from queue.
|
||||
/// Assumption is that this transaction nonce is not related to client nonce,
|
||||
/// so transactions left in queue are processed according to client nonce.
|
||||
///
|
||||
/// If gap is introduced marks subsequent transactions as future
|
||||
pub fn remove<T>(&mut self, transaction_hash: &H256, fetch_account: &T)
|
||||
pub fn remove_invalid<T>(&mut self, transaction_hash: &H256, fetch_account: &T)
|
||||
where T: Fn(&Address) -> AccountDetails {
|
||||
|
||||
let transaction = self.by_hash.remove(transaction_hash);
|
||||
if transaction.is_none() {
|
||||
// We don't know this transaction
|
||||
@ -425,7 +433,6 @@ impl TransactionQueue {
|
||||
let nonce = transaction.nonce();
|
||||
let current_nonce = fetch_account(&sender).nonce;
|
||||
|
||||
|
||||
// Remove from future
|
||||
let order = self.future.drop(&sender, &nonce);
|
||||
if order.is_some() {
|
||||
@ -465,7 +472,7 @@ impl TransactionQueue {
|
||||
if k >= current_nonce {
|
||||
self.future.insert(*sender, k, order.update_height(k, current_nonce));
|
||||
} else {
|
||||
trace!(target: "miner", "Dropping old transaction: {:?} (nonce: {} < {})", order.hash, k, current_nonce);
|
||||
trace!(target: "miner", "Removing old transaction: {:?} (nonce: {} < {})", order.hash, k, current_nonce);
|
||||
// Remove the transaction completely
|
||||
self.by_hash.remove(&order.hash);
|
||||
}
|
||||
@ -486,7 +493,7 @@ impl TransactionQueue {
|
||||
if k >= current_nonce {
|
||||
self.future.insert(*sender, k, order.update_height(k, current_nonce));
|
||||
} else {
|
||||
trace!(target: "miner", "Dropping old transaction: {:?} (nonce: {} < {})", order.hash, k, current_nonce);
|
||||
trace!(target: "miner", "Removing old transaction: {:?} (nonce: {} < {})", order.hash, k, current_nonce);
|
||||
self.by_hash.remove(&order.hash);
|
||||
}
|
||||
}
|
||||
@ -665,9 +672,14 @@ mod test {
|
||||
new_unsigned_tx(U256::from(123)).sign(&keypair.secret())
|
||||
}
|
||||
|
||||
|
||||
fn default_nonce_val() -> U256 {
|
||||
U256::from(123)
|
||||
}
|
||||
|
||||
fn default_nonce(_address: &Address) -> AccountDetails {
|
||||
AccountDetails {
|
||||
nonce: U256::from(123),
|
||||
nonce: default_nonce_val(),
|
||||
balance: !U256::zero()
|
||||
}
|
||||
}
|
||||
@ -965,8 +977,7 @@ mod test {
|
||||
// given
|
||||
let prev_nonce = |a: &Address| AccountDetails{ nonce: default_nonce(a).nonce - U256::one(), balance:
|
||||
!U256::zero() };
|
||||
let next2_nonce = |a: &Address| AccountDetails{ nonce: default_nonce(a).nonce + U256::from(2), balance:
|
||||
!U256::zero() };
|
||||
let next2_nonce = default_nonce_val() + U256::from(3);
|
||||
|
||||
let mut txq = TransactionQueue::new();
|
||||
|
||||
@ -976,7 +987,7 @@ mod test {
|
||||
assert_eq!(txq.status().future, 2);
|
||||
|
||||
// when
|
||||
txq.remove(&tx.hash(), &next2_nonce);
|
||||
txq.remove_all(tx.sender().unwrap(), next2_nonce);
|
||||
// should remove both transactions since they are not valid
|
||||
|
||||
// then
|
||||
@ -1019,8 +1030,8 @@ mod test {
|
||||
assert_eq!(txq2.status().future, 1);
|
||||
|
||||
// when
|
||||
txq2.remove(&tx.hash(), &default_nonce);
|
||||
txq2.remove(&tx2.hash(), &default_nonce);
|
||||
txq2.remove_all(tx.sender().unwrap(), tx.nonce + U256::one());
|
||||
txq2.remove_all(tx2.sender().unwrap(), tx2.nonce + U256::one());
|
||||
|
||||
|
||||
// then
|
||||
@ -1042,7 +1053,7 @@ mod test {
|
||||
assert_eq!(txq.status().pending, 3);
|
||||
|
||||
// when
|
||||
txq.remove(&tx.hash(), &default_nonce);
|
||||
txq.remove_invalid(&tx.hash(), &default_nonce);
|
||||
|
||||
// then
|
||||
let stats = txq.status();
|
||||
@ -1152,7 +1163,7 @@ mod test {
|
||||
assert_eq!(txq.status().pending, 2);
|
||||
|
||||
// when
|
||||
txq.remove(&tx1.hash(), &default_nonce);
|
||||
txq.remove_invalid(&tx1.hash(), &default_nonce);
|
||||
assert_eq!(txq.status().pending, 0);
|
||||
assert_eq!(txq.status().future, 1);
|
||||
txq.add(tx1.clone(), &default_nonce).unwrap();
|
||||
@ -1166,8 +1177,6 @@ mod test {
|
||||
#[test]
|
||||
fn should_not_move_to_future_if_state_nonce_is_higher() {
|
||||
// given
|
||||
let next_nonce = |a: &Address| AccountDetails { nonce: default_nonce(a).nonce + U256::one(), balance:
|
||||
!U256::zero() };
|
||||
let mut txq = TransactionQueue::new();
|
||||
let (tx, tx2) = new_txs(U256::from(1));
|
||||
let tx3 = new_tx();
|
||||
@ -1178,7 +1187,8 @@ mod test {
|
||||
assert_eq!(txq.status().pending, 3);
|
||||
|
||||
// when
|
||||
txq.remove(&tx.hash(), &next_nonce);
|
||||
let sender = tx.sender().unwrap();
|
||||
txq.remove_all(sender, default_nonce_val() + U256::one());
|
||||
|
||||
// then
|
||||
let stats = txq.status();
|
||||
@ -1254,7 +1264,7 @@ mod test {
|
||||
assert_eq!(txq.status().future, 2);
|
||||
|
||||
// when
|
||||
txq.remove(&tx1.hash(), &next_nonce);
|
||||
txq.remove_invalid(&tx1.hash(), &next_nonce);
|
||||
|
||||
// then
|
||||
let stats = txq.status();
|
||||
@ -1286,4 +1296,22 @@ mod test {
|
||||
// then
|
||||
assert_eq!(txq.last_nonce(&from), Some(nonce));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_remove_old_transaction_even_if_newer_transaction_was_not_known() {
|
||||
// given
|
||||
let mut txq = TransactionQueue::new();
|
||||
let (tx1, tx2) = new_txs(U256::one());
|
||||
let (nonce1, nonce2) = (tx1.nonce, tx2.nonce);
|
||||
let details1 = |_a: &Address| AccountDetails { nonce: nonce1, balance: !U256::zero() };
|
||||
|
||||
// Insert first transaction
|
||||
txq.add(tx1, &details1).unwrap();
|
||||
|
||||
// when
|
||||
txq.remove_all(tx2.sender().unwrap(), nonce2 + U256::one());
|
||||
|
||||
// then
|
||||
assert!(txq.top_transactions().is_empty());
|
||||
}
|
||||
}
|
||||
|
158
parity/hypervisor/mod.rs
Normal file
158
parity/hypervisor/mod.rs
Normal file
@ -0,0 +1,158 @@
|
||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Parity interprocess hypervisor module
|
||||
|
||||
// while not included in binary
|
||||
#![allow(dead_code)]
|
||||
|
||||
pub mod service;
|
||||
|
||||
/// Default value for hypervisor ipc listener
|
||||
pub const HYPERVISOR_IPC_URL: &'static str = "ipc:///tmp/parity-internal-hyper-status.ipc";
|
||||
|
||||
use nanoipc;
|
||||
use std::sync::{Arc,RwLock};
|
||||
use hypervisor::service::*;
|
||||
use std::process::{Command,Child};
|
||||
use std::collections::HashMap;
|
||||
|
||||
type BinaryId = &'static str;
|
||||
|
||||
const BLOCKCHAIN_DB_BINARY: BinaryId = "blockchain";
|
||||
|
||||
pub struct Hypervisor {
|
||||
ipc_addr: String,
|
||||
service: Arc<HypervisorService>,
|
||||
ipc_worker: RwLock<nanoipc::Worker<HypervisorService>>,
|
||||
processes: RwLock<HashMap<BinaryId, Child>>,
|
||||
}
|
||||
|
||||
impl Hypervisor {
|
||||
/// initializes the Hypervisor service with the open ipc socket for incoming clients
|
||||
pub fn new() -> Hypervisor {
|
||||
Hypervisor::with_url(HYPERVISOR_IPC_URL)
|
||||
}
|
||||
|
||||
/// Starts on the specified address for ipc listener
|
||||
fn with_url(addr: &str) -> Hypervisor{
|
||||
Hypervisor::with_url_and_service(addr, HypervisorService::new())
|
||||
}
|
||||
|
||||
/// Starts with the specified address for the ipc listener and
|
||||
/// the specified list of modules in form of created service
|
||||
fn with_url_and_service(addr: &str, service: Arc<HypervisorService>) -> Hypervisor {
|
||||
let worker = nanoipc::Worker::new(&service);
|
||||
Hypervisor{
|
||||
ipc_addr: addr.to_owned(),
|
||||
service: service,
|
||||
ipc_worker: RwLock::new(worker),
|
||||
processes: RwLock::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Since one binary can host multiple modules
|
||||
/// we match binaries
|
||||
fn match_module(module_id: &IpcModuleId) -> Option<BinaryId> {
|
||||
match *module_id {
|
||||
BLOCKCHAIN_MODULE_ID => Some(BLOCKCHAIN_DB_BINARY),
|
||||
// none means the module is inside the main binary
|
||||
_ => None
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates IPC listener and starts all binaries
|
||||
fn start(&self) {
|
||||
let mut worker = self.ipc_worker.write().unwrap();
|
||||
worker.add_reqrep(&self.ipc_addr).unwrap_or_else(|e| panic!("Hypervisor ipc worker can not start - critical! ({:?})", e));
|
||||
|
||||
for module_id in self.service.module_ids() {
|
||||
self.start_module(module_id);
|
||||
}
|
||||
}
|
||||
|
||||
/// Start binary for the specified module
|
||||
/// Does nothing when it is already started on module is inside the
|
||||
/// main binary
|
||||
fn start_module(&self, module_id: IpcModuleId) {
|
||||
Self::match_module(&module_id).map(|binary_id| {
|
||||
let mut processes = self.processes.write().unwrap();
|
||||
{
|
||||
if processes.get(binary_id).is_some() {
|
||||
// already started for another module
|
||||
return;
|
||||
}
|
||||
}
|
||||
let child = Command::new(binary_id).spawn().unwrap_or_else(
|
||||
|e| panic!("Hypervisor cannot start binary: {}", e));
|
||||
processes.insert(binary_id, child);
|
||||
});
|
||||
}
|
||||
|
||||
/// Reports if all modules are checked in
|
||||
pub fn modules_ready(&self) -> bool {
|
||||
self.service.unchecked_count() == 0
|
||||
}
|
||||
|
||||
/// Waits for every required module to check in
|
||||
pub fn wait_for_startup(&self) {
|
||||
let mut worker = self.ipc_worker.write().unwrap();
|
||||
while !self.modules_ready() {
|
||||
worker.poll()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::sync::atomic::{AtomicBool,Ordering};
|
||||
use std::sync::Arc;
|
||||
use super::service::*;
|
||||
use nanoipc;
|
||||
|
||||
#[test]
|
||||
fn can_init() {
|
||||
let url = "ipc:///tmp/test-parity-hypervisor-10.ipc";
|
||||
let test_module_id = 8080u64;
|
||||
|
||||
let hypervisor = Hypervisor::with_url_and_service(url, HypervisorService::with_modules(vec![test_module_id]));
|
||||
assert_eq!(false, hypervisor.modules_ready());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_wait_for_startup() {
|
||||
let url = "ipc:///tmp/test-parity-hypervisor-20.ipc";
|
||||
let test_module_id = 8080u64;
|
||||
|
||||
let hypervisor_ready = Arc::new(AtomicBool::new(false));
|
||||
let hypervisor_ready_local = hypervisor_ready.clone();
|
||||
|
||||
::std::thread::spawn(move || {
|
||||
while !hypervisor_ready.load(Ordering::Relaxed) { }
|
||||
|
||||
let client = nanoipc::init_client::<HypervisorServiceClient<_>>(url).unwrap();
|
||||
client.handshake().unwrap();
|
||||
client.module_ready(test_module_id);
|
||||
});
|
||||
|
||||
let hypervisor = Hypervisor::with_url_and_service(url, HypervisorService::with_modules(vec![test_module_id]));
|
||||
hypervisor.start();
|
||||
hypervisor_ready_local.store(true, Ordering::Relaxed);
|
||||
hypervisor.wait_for_startup();
|
||||
|
||||
assert_eq!(true, hypervisor.modules_ready());
|
||||
}
|
||||
}
|
19
parity/hypervisor/service.rs
Normal file
19
parity/hypervisor/service.rs
Normal file
@ -0,0 +1,19 @@
|
||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Parity interprocess hypervisor IPC service
|
||||
|
||||
include!(concat!(env!("OUT_DIR"), "/hypervisor_service_cg.rs"));
|
69
parity/hypervisor/service.rs.in
Normal file
69
parity/hypervisor/service.rs.in
Normal file
@ -0,0 +1,69 @@
|
||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::sync::{RwLock,Arc};
|
||||
use std::ops::*;
|
||||
use ipc::IpcConfig;
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub type IpcModuleId = u64;
|
||||
|
||||
/// Blockhain database module id
|
||||
pub const BLOCKCHAIN_MODULE_ID: IpcModuleId = 2000;
|
||||
|
||||
/// IPC service that handles module management
|
||||
pub struct HypervisorService {
|
||||
check_list: RwLock<HashMap<IpcModuleId, bool>>,
|
||||
}
|
||||
|
||||
#[derive(Ipc)]
|
||||
impl HypervisorService {
|
||||
fn module_ready(&self, module_id: u64) -> bool {
|
||||
let mut check_list = self.check_list.write().unwrap();
|
||||
check_list.get_mut(&module_id).map(|mut status| *status = true);
|
||||
check_list.iter().any(|(_, status)| !status)
|
||||
}
|
||||
}
|
||||
|
||||
impl HypervisorService {
|
||||
/// New service with the default list of modules
|
||||
pub fn new() -> Arc<HypervisorService> {
|
||||
HypervisorService::with_modules(vec![])
|
||||
}
|
||||
|
||||
/// New service with list of modules that will report for being ready
|
||||
pub fn with_modules(module_ids: Vec<IpcModuleId>) -> Arc<HypervisorService> {
|
||||
let mut check_list = HashMap::new();
|
||||
for module_id in module_ids {
|
||||
check_list.insert(module_id, false);
|
||||
}
|
||||
Arc::new(HypervisorService {
|
||||
check_list: RwLock::new(check_list),
|
||||
})
|
||||
}
|
||||
|
||||
/// Number of modules still being waited for check-in
|
||||
pub fn unchecked_count(&self) -> usize {
|
||||
self.check_list.read().unwrap().iter().filter(|&(_, status)| !status).count()
|
||||
}
|
||||
|
||||
/// List of all modules within this service
|
||||
pub fn module_ids(&self) -> Vec<IpcModuleId> {
|
||||
self.check_list.read().unwrap().iter().map(|(module_id, _)| module_id).cloned().collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl ::ipc::IpcConfig for HypervisorService {}
|
@ -37,6 +37,10 @@ extern crate time;
|
||||
extern crate number_prefix;
|
||||
extern crate rpassword;
|
||||
extern crate semver;
|
||||
extern crate ethcore_ipc as ipc;
|
||||
extern crate ethcore_ipc_nano as nanoipc;
|
||||
extern crate serde;
|
||||
extern crate bincode;
|
||||
|
||||
// for price_info.rs
|
||||
#[macro_use] extern crate hyper;
|
||||
@ -73,6 +77,7 @@ use webapp::Server as WebappServer;
|
||||
|
||||
mod price_info;
|
||||
mod upgrade;
|
||||
mod hypervisor;
|
||||
|
||||
fn die_with_message(msg: &str) -> ! {
|
||||
println!("ERROR: {}", msg);
|
||||
@ -133,8 +138,7 @@ API and Console Options:
|
||||
--jsonrpc-interface IP Specify the hostname portion of the JSONRPC API
|
||||
server, IP should be an interface's IP address, or
|
||||
all (all interfaces) or local [default: local].
|
||||
--jsonrpc-cors URL Specify CORS header for JSON-RPC API responses
|
||||
[default: null].
|
||||
--jsonrpc-cors URL Specify CORS header for JSON-RPC API responses.
|
||||
--jsonrpc-apis APIS Specify the APIs available through the JSONRPC
|
||||
interface. APIS is a comma-delimited list of API
|
||||
name. Possible name are web3, eth and net.
|
||||
@ -242,7 +246,7 @@ struct Args {
|
||||
flag_jsonrpc: bool,
|
||||
flag_jsonrpc_interface: String,
|
||||
flag_jsonrpc_port: u16,
|
||||
flag_jsonrpc_cors: String,
|
||||
flag_jsonrpc_cors: Option<String>,
|
||||
flag_jsonrpc_apis: String,
|
||||
flag_webapp: bool,
|
||||
flag_webapp_port: u16,
|
||||
@ -307,7 +311,7 @@ fn setup_rpc_server(
|
||||
secret_store: Arc<AccountService>,
|
||||
miner: Arc<Miner>,
|
||||
url: &SocketAddr,
|
||||
cors_domain: &str,
|
||||
cors_domain: Option<String>,
|
||||
apis: Vec<&str>,
|
||||
) -> RpcServer {
|
||||
use rpc::v1::*;
|
||||
@ -380,7 +384,7 @@ fn setup_rpc_server(
|
||||
_secret_store: Arc<AccountService>,
|
||||
_miner: Arc<Miner>,
|
||||
_url: &SocketAddr,
|
||||
_cors_domain: &str,
|
||||
_cors_domain: Option<String>,
|
||||
_apis: Vec<&str>,
|
||||
) -> ! {
|
||||
die!("Your Parity version has been compiled without JSON-RPC support.")
|
||||
@ -550,6 +554,7 @@ impl Configuration {
|
||||
let jdb_types = [journaldb::Algorithm::Archive, journaldb::Algorithm::EarlyMerge, journaldb::Algorithm::OverlayRecent, journaldb::Algorithm::RefCounted];
|
||||
for i in jdb_types.into_iter() {
|
||||
let db = journaldb::new(&append_path(&get_db_path(&Path::new(&self.path()), *i, spec.genesis_header().hash()), "state"), *i);
|
||||
trace!(target: "parity", "Looking for best DB: {} at {:?}", i, db.latest_era());
|
||||
match (latest_era, db.latest_era()) {
|
||||
(Some(best), Some(this)) if best >= this => {}
|
||||
(_, None) => {}
|
||||
@ -582,7 +587,7 @@ impl Configuration {
|
||||
"auto" => self.find_best_db(spec).unwrap_or(journaldb::Algorithm::OverlayRecent),
|
||||
_ => { die!("Invalid pruning method given."); }
|
||||
};
|
||||
info!("Using state DB of {}", client_config.pruning);
|
||||
trace!(target: "parity", "Using pruning strategy of {}", client_config.pruning);
|
||||
client_config.name = self.args.flag_identity.clone();
|
||||
client_config.queue.max_mem_use = self.args.flag_queue_max_size;
|
||||
client_config
|
||||
@ -601,6 +606,18 @@ impl Configuration {
|
||||
print_version();
|
||||
return;
|
||||
}
|
||||
|
||||
match ::upgrade::upgrade(Some(&self.path())) {
|
||||
Ok(upgrades_applied) => {
|
||||
if upgrades_applied > 0 {
|
||||
println!("Executed {} upgrade scripts - ok", upgrades_applied);
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
die!("Error upgrading parity data: {:?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
if self.args.cmd_daemon {
|
||||
Daemonize::new()
|
||||
.pid_file(self.args.arg_pid_file.clone())
|
||||
@ -621,9 +638,9 @@ impl Configuration {
|
||||
let mut secret_store = SecretStore::new_in(Path::new(&self.keys_path()));
|
||||
if self.args.cmd_new {
|
||||
println!("Please note that password is NOT RECOVERABLE.");
|
||||
println!("Type password: ");
|
||||
print!("Type password: ");
|
||||
let password = read_password().unwrap();
|
||||
println!("Repeat password: ");
|
||||
print!("Repeat password: ");
|
||||
let password_repeat = read_password().unwrap();
|
||||
if password != password_repeat {
|
||||
println!("Passwords do not match!");
|
||||
@ -712,7 +729,7 @@ impl Configuration {
|
||||
self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port)
|
||||
);
|
||||
let addr = SocketAddr::from_str(&url).unwrap_or_else(|_| die!("{}: Invalid JSONRPC listen host/port given.", url));
|
||||
let cors_domain = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors);
|
||||
let cors_domain = self.args.flag_jsonrpc_cors.clone().or(self.args.flag_rpccorsdomain.clone());
|
||||
|
||||
Some(setup_rpc_server(
|
||||
service.client(),
|
||||
@ -720,7 +737,7 @@ impl Configuration {
|
||||
account_service.clone(),
|
||||
miner.clone(),
|
||||
&addr,
|
||||
&cors_domain,
|
||||
cors_domain,
|
||||
apis.split(',').collect()
|
||||
))
|
||||
} else {
|
||||
@ -815,16 +832,6 @@ fn die_with_io_error(e: std::io::Error) -> ! {
|
||||
}
|
||||
|
||||
fn main() {
|
||||
match ::upgrade::upgrade() {
|
||||
Ok(upgrades_applied) => {
|
||||
if upgrades_applied > 0 {
|
||||
println!("Executed {} upgrade scripts - ok", upgrades_applied);
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
die!("Error upgrading parity data: {:?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
Configuration::parse().execute();
|
||||
}
|
||||
|
@ -18,14 +18,15 @@
|
||||
|
||||
use semver::Version;
|
||||
use std::collections::*;
|
||||
use std::fs::File;
|
||||
use std::fs::{File, create_dir_all};
|
||||
use std::env;
|
||||
use std::io::{Read, Write};
|
||||
|
||||
#[cfg_attr(feature="dev", allow(enum_variant_names))]
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
CannotLockVersionFile,
|
||||
CannotCreateConfigPath,
|
||||
CannotWriteVersionFile,
|
||||
CannotUpdateVersionFile,
|
||||
}
|
||||
|
||||
@ -66,7 +67,7 @@ fn dummy_upgrade() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn push_updrades(upgrades: &mut UpgradeList)
|
||||
fn push_upgrades(upgrades: &mut UpgradeList)
|
||||
{
|
||||
// dummy upgrade (remove when the first one is in)
|
||||
upgrades.insert(
|
||||
@ -76,7 +77,7 @@ fn push_updrades(upgrades: &mut UpgradeList)
|
||||
|
||||
fn upgrade_from_version(previous_version: &Version) -> Result<usize, Error> {
|
||||
let mut upgrades = HashMap::new();
|
||||
push_updrades(&mut upgrades);
|
||||
push_upgrades(&mut upgrades);
|
||||
|
||||
let current_version = Version::parse(CURRENT_VERSION).unwrap();
|
||||
|
||||
@ -91,11 +92,15 @@ fn upgrade_from_version(previous_version: &Version) -> Result<usize, Error> {
|
||||
Ok(count)
|
||||
}
|
||||
|
||||
fn with_locked_version<F>(script: F) -> Result<usize, Error>
|
||||
fn with_locked_version<F>(db_path: Option<&str>, script: F) -> Result<usize, Error>
|
||||
where F: Fn(&Version) -> Result<usize, Error>
|
||||
{
|
||||
let mut path = db_path.map_or({
|
||||
let mut path = env::home_dir().expect("Applications should have a home dir");
|
||||
path.push(".parity");
|
||||
path
|
||||
}, |s| ::std::path::PathBuf::from(s));
|
||||
try!(create_dir_all(&path).map_err(|_| Error::CannotCreateConfigPath));
|
||||
path.push("ver.lock");
|
||||
|
||||
let version =
|
||||
@ -108,7 +113,7 @@ fn with_locked_version<F>(script: F) -> Result<usize, Error>
|
||||
})
|
||||
.unwrap_or_else(|| Version::parse("0.9.0").unwrap());
|
||||
|
||||
let mut lock = try!(File::create(&path).map_err(|_| Error::CannotLockVersionFile));
|
||||
let mut lock = try!(File::create(&path).map_err(|_| Error::CannotWriteVersionFile));
|
||||
let result = script(&version);
|
||||
|
||||
let written_version = Version::parse(CURRENT_VERSION).unwrap();
|
||||
@ -116,8 +121,8 @@ fn with_locked_version<F>(script: F) -> Result<usize, Error>
|
||||
result
|
||||
}
|
||||
|
||||
pub fn upgrade() -> Result<usize, Error> {
|
||||
with_locked_version(|ver| {
|
||||
pub fn upgrade(db_path: Option<&str>) -> Result<usize, Error> {
|
||||
with_locked_version(db_path, |ver| {
|
||||
upgrade_from_version(ver)
|
||||
})
|
||||
}
|
||||
|
@ -58,8 +58,8 @@ impl RpcServer {
|
||||
}
|
||||
|
||||
/// Start server asynchronously and returns result with `Server` handle on success or an error.
|
||||
pub fn start_http(&self, addr: &SocketAddr, cors_domain: &str) -> Result<Server, RpcServerError> {
|
||||
pub fn start_http(&self, addr: &SocketAddr, cors_domain: Option<String>) -> Result<Server, RpcServerError> {
|
||||
let cors_domain = cors_domain.to_owned();
|
||||
Server::start(addr, self.handler.clone(), jsonrpc_http_server::AccessControlAllowOrigin::Value(cors_domain))
|
||||
Server::start(addr, self.handler.clone(), cors_domain.map(jsonrpc_http_server::AccessControlAllowOrigin::Value))
|
||||
}
|
||||
}
|
||||
|
@ -40,14 +40,6 @@ use v1::helpers::{PollFilter, PollManager, ExternalMinerService, ExternalMiner};
|
||||
use util::keys::store::AccountProvider;
|
||||
use serde;
|
||||
|
||||
fn default_gas() -> U256 {
|
||||
U256::from(21_000)
|
||||
}
|
||||
|
||||
fn default_call_gas() -> U256 {
|
||||
U256::from(50_000_000)
|
||||
}
|
||||
|
||||
/// Eth rpc implementation.
|
||||
pub struct EthClient<C, S, A, M, EM = ExternalMiner>
|
||||
where C: BlockChainClient,
|
||||
@ -180,7 +172,7 @@ impl<C, S, A, M, EM> EthClient<C, S, A, M, EM>
|
||||
Ok(EthTransaction {
|
||||
nonce: request.nonce.unwrap_or_else(|| client.nonce(&from)),
|
||||
action: request.to.map_or(Action::Create, Action::Call),
|
||||
gas: request.gas.unwrap_or_else(default_call_gas),
|
||||
gas: request.gas.unwrap_or(U256::from(50_000_000)),
|
||||
gas_price: request.gas_price.unwrap_or_else(|| miner.sensible_gas_price()),
|
||||
value: request.value.unwrap_or_else(U256::zero),
|
||||
data: request.data.map_or_else(Vec::new, |d| d.to_vec())
|
||||
@ -498,7 +490,7 @@ impl<C, S, A, M, EM> Eth for EthClient<C, S, A, M, EM>
|
||||
.map(|nonce| nonce + U256::one()))
|
||||
.unwrap_or_else(|| client.nonce(&request.from)),
|
||||
action: request.to.map_or(Action::Create, Action::Call),
|
||||
gas: request.gas.unwrap_or_else(default_gas),
|
||||
gas: request.gas.unwrap_or_else(|| miner.sensible_gas_limit()),
|
||||
gas_price: request.gas_price.unwrap_or_else(|| miner.sensible_gas_price()),
|
||||
value: request.value.unwrap_or_else(U256::zero),
|
||||
data: request.data.map_or_else(Vec::new, |d| d.to_vec()),
|
||||
@ -524,6 +516,7 @@ impl<C, S, A, M, EM> Eth for EthClient<C, S, A, M, EM>
|
||||
}
|
||||
|
||||
fn call(&self, params: Params) -> Result<Value, Error> {
|
||||
trace!(target: "jsonrpc", "call: {:?}", params);
|
||||
from_params_discard_second(params).and_then(|(request, )| {
|
||||
let signed = try!(self.sign_call(request));
|
||||
let client = take_weak!(self.client);
|
||||
|
@ -15,6 +15,7 @@
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Ethcore-specific rpc implementation.
|
||||
use util::{U256, Address};
|
||||
use std::sync::{Arc, Weak};
|
||||
use jsonrpc_core::*;
|
||||
use ethminer::{MinerService};
|
||||
@ -37,6 +38,39 @@ impl<M> EthcoreClient<M> where M: MinerService {
|
||||
}
|
||||
|
||||
impl<M> Ethcore for EthcoreClient<M> where M: MinerService + 'static {
|
||||
|
||||
fn set_min_gas_price(&self, params: Params) -> Result<Value, Error> {
|
||||
from_params::<(U256,)>(params).and_then(|(gas_price,)| {
|
||||
take_weak!(self.miner).set_minimal_gas_price(gas_price);
|
||||
to_value(&true)
|
||||
})
|
||||
}
|
||||
|
||||
fn set_gas_floor_target(&self, params: Params) -> Result<Value, Error> {
|
||||
from_params::<(U256,)>(params).and_then(|(gas_floor_target,)| {
|
||||
take_weak!(self.miner).set_gas_floor_target(gas_floor_target);
|
||||
to_value(&true)
|
||||
})
|
||||
}
|
||||
|
||||
fn set_extra_data(&self, params: Params) -> Result<Value, Error> {
|
||||
from_params::<(Bytes,)>(params).and_then(|(extra_data,)| {
|
||||
take_weak!(self.miner).set_extra_data(extra_data.to_vec());
|
||||
to_value(&true)
|
||||
})
|
||||
}
|
||||
|
||||
fn set_author(&self, params: Params) -> Result<Value, Error> {
|
||||
from_params::<(Address,)>(params).and_then(|(author,)| {
|
||||
take_weak!(self.miner).set_author(author);
|
||||
to_value(&true)
|
||||
})
|
||||
}
|
||||
|
||||
fn min_gas_price(&self, _: Params) -> Result<Value, Error> {
|
||||
to_value(&take_weak!(self.miner).minimal_gas_price())
|
||||
}
|
||||
|
||||
fn extra_data(&self, _: Params) -> Result<Value, Error> {
|
||||
to_value(&Bytes::new(take_weak!(self.miner).extra_data()))
|
||||
}
|
||||
|
@ -15,10 +15,13 @@
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::str::FromStr;
|
||||
use jsonrpc_core::IoHandler;
|
||||
use v1::{Ethcore, EthcoreClient};
|
||||
use v1::tests::helpers::{TestMinerService};
|
||||
use ethminer::MinerService;
|
||||
use v1::tests::helpers::TestMinerService;
|
||||
use util::numbers::*;
|
||||
use rustc_serialize::hex::FromHex;
|
||||
|
||||
|
||||
fn miner_service() -> Arc<TestMinerService> {
|
||||
@ -52,3 +55,71 @@ fn rpc_ethcore_gas_floor_target() {
|
||||
assert_eq!(io.handle_request(request), Some(response.to_owned()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rpc_ethcore_min_gas_price() {
|
||||
let miner = miner_service();
|
||||
let ethcore = EthcoreClient::new(&miner).to_delegate();
|
||||
let io = IoHandler::new();
|
||||
io.add_delegate(ethcore);
|
||||
|
||||
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_minGasPrice", "params": [], "id": 1}"#;
|
||||
let response = r#"{"jsonrpc":"2.0","result":"0x01312d00","id":1}"#;
|
||||
|
||||
assert_eq!(io.handle_request(request), Some(response.to_owned()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rpc_ethcore_set_min_gas_price() {
|
||||
let miner = miner_service();
|
||||
let ethcore = EthcoreClient::new(&miner).to_delegate();
|
||||
let io = IoHandler::new();
|
||||
io.add_delegate(ethcore);
|
||||
|
||||
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setMinGasPrice", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#;
|
||||
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
|
||||
|
||||
assert_eq!(io.handle_request(request), Some(response.to_owned()));
|
||||
assert_eq!(miner.minimal_gas_price(), U256::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rpc_ethcore_set_gas_floor_target() {
|
||||
let miner = miner_service();
|
||||
let ethcore = EthcoreClient::new(&miner).to_delegate();
|
||||
let io = IoHandler::new();
|
||||
io.add_delegate(ethcore);
|
||||
|
||||
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setGasFloorTarget", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#;
|
||||
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
|
||||
|
||||
assert_eq!(io.handle_request(request), Some(response.to_owned()));
|
||||
assert_eq!(miner.gas_floor_target(), U256::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rpc_ethcore_set_extra_data() {
|
||||
let miner = miner_service();
|
||||
let ethcore = EthcoreClient::new(&miner).to_delegate();
|
||||
let io = IoHandler::new();
|
||||
io.add_delegate(ethcore);
|
||||
|
||||
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setExtraData", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#;
|
||||
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
|
||||
|
||||
assert_eq!(io.handle_request(request), Some(response.to_owned()));
|
||||
assert_eq!(miner.extra_data(), "cd1722f3947def4cf144679da39c4c32bdc35681".from_hex().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rpc_ethcore_set_author() {
|
||||
let miner = miner_service();
|
||||
let ethcore = EthcoreClient::new(&miner).to_delegate();
|
||||
let io = IoHandler::new();
|
||||
io.add_delegate(ethcore);
|
||||
|
||||
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setAuthor", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#;
|
||||
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
|
||||
|
||||
assert_eq!(io.handle_request(request), Some(response.to_owned()));
|
||||
assert_eq!(miner.author(), Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap());
|
||||
}
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
//! Test implementation of miner service.
|
||||
|
||||
use util::{Address, H256, Bytes, U256};
|
||||
use util::{Address, H256, Bytes, U256, FixedHash};
|
||||
use util::standard::*;
|
||||
use ethcore::error::Error;
|
||||
use ethcore::client::BlockChainClient;
|
||||
@ -34,6 +34,11 @@ pub struct TestMinerService {
|
||||
pub pending_transactions: Mutex<HashMap<H256, SignedTransaction>>,
|
||||
/// Last nonces.
|
||||
pub last_nonces: RwLock<HashMap<Address, U256>>,
|
||||
|
||||
min_gas_price: RwLock<U256>,
|
||||
gas_floor_target: RwLock<U256>,
|
||||
author: RwLock<Address>,
|
||||
extra_data: RwLock<Bytes>,
|
||||
}
|
||||
|
||||
impl Default for TestMinerService {
|
||||
@ -43,6 +48,10 @@ impl Default for TestMinerService {
|
||||
latest_closed_block: Mutex::new(None),
|
||||
pending_transactions: Mutex::new(HashMap::new()),
|
||||
last_nonces: RwLock::new(HashMap::new()),
|
||||
min_gas_price: RwLock::new(U256::from(20_000_000)),
|
||||
gas_floor_target: RwLock::new(U256::from(12345)),
|
||||
author: RwLock::new(Address::zero()),
|
||||
extra_data: RwLock::new(vec![1, 2, 3, 4]),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -58,6 +67,39 @@ impl MinerService for TestMinerService {
|
||||
}
|
||||
}
|
||||
|
||||
fn set_author(&self, author: Address) {
|
||||
*self.author.write().unwrap() = author;
|
||||
}
|
||||
|
||||
fn set_extra_data(&self, extra_data: Bytes) {
|
||||
*self.extra_data.write().unwrap() = extra_data;
|
||||
}
|
||||
|
||||
/// Set the gas limit we wish to target when sealing a new block.
|
||||
fn set_gas_floor_target(&self, target: U256) {
|
||||
*self.gas_floor_target.write().unwrap() = target;
|
||||
}
|
||||
|
||||
fn set_minimal_gas_price(&self, min_gas_price: U256) {
|
||||
*self.min_gas_price.write().unwrap() = min_gas_price;
|
||||
}
|
||||
|
||||
fn author(&self) -> Address {
|
||||
*self.author.read().unwrap()
|
||||
}
|
||||
|
||||
fn minimal_gas_price(&self) -> U256 {
|
||||
*self.min_gas_price.read().unwrap()
|
||||
}
|
||||
|
||||
fn extra_data(&self) -> Bytes {
|
||||
self.extra_data.read().unwrap().clone()
|
||||
}
|
||||
|
||||
fn gas_floor_target(&self) -> U256 {
|
||||
*self.gas_floor_target.read().unwrap()
|
||||
}
|
||||
|
||||
/// Imports transactions to transaction queue.
|
||||
fn import_transactions<T>(&self, transactions: Vec<SignedTransaction>, _fetch_account: T) -> Vec<Result<(), Error>>
|
||||
where T: Fn(&Address) -> AccountDetails {
|
||||
@ -111,12 +153,4 @@ impl MinerService for TestMinerService {
|
||||
fn submit_seal(&self, _chain: &BlockChainClient, _pow_hash: H256, _seal: Vec<Bytes>) -> Result<(), Error> {
|
||||
unimplemented!();
|
||||
}
|
||||
|
||||
fn extra_data(&self) -> Bytes {
|
||||
vec![1, 2, 3, 4]
|
||||
}
|
||||
|
||||
fn gas_floor_target(&self) -> U256 {
|
||||
U256::from(12345)
|
||||
}
|
||||
}
|
||||
|
@ -20,17 +20,39 @@ use jsonrpc_core::*;
|
||||
|
||||
/// Ethcore-specific rpc interface.
|
||||
pub trait Ethcore: Sized + Send + Sync + 'static {
|
||||
|
||||
/// Sets new minimal gas price for mined blocks.
|
||||
fn set_min_gas_price(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
|
||||
|
||||
/// Sets new gas floor target for mined blocks.
|
||||
fn set_gas_floor_target(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
|
||||
|
||||
/// Sets new extra data for mined blocks.
|
||||
fn set_extra_data(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
|
||||
|
||||
/// Sets new author for mined block.
|
||||
fn set_author(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
|
||||
|
||||
/// Returns mining extra data.
|
||||
fn extra_data(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
|
||||
|
||||
/// Returns mining gas floor target.
|
||||
fn gas_floor_target(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
|
||||
|
||||
/// Returns minimal gas price for transaction to be included in queue.
|
||||
fn min_gas_price(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
|
||||
|
||||
/// Should be used to convert object to io delegate.
|
||||
fn to_delegate(self) -> IoDelegate<Self> {
|
||||
let mut delegate = IoDelegate::new(Arc::new(self));
|
||||
delegate.add_method("ethcore_setMinGasPrice", Ethcore::set_min_gas_price);
|
||||
delegate.add_method("ethcore_setGasFloorTarget", Ethcore::set_gas_floor_target);
|
||||
delegate.add_method("ethcore_setExtraData", Ethcore::set_extra_data);
|
||||
delegate.add_method("ethcore_setAuthor", Ethcore::set_author);
|
||||
|
||||
delegate.add_method("ethcore_extraData", Ethcore::extra_data);
|
||||
delegate.add_method("ethcore_gasFloorTarget", Ethcore::gas_floor_target);
|
||||
delegate.add_method("ethcore_minGasPrice", Ethcore::min_gas_price);
|
||||
delegate
|
||||
}
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ pub struct Filter {
|
||||
impl Into<EthFilter> for Filter {
|
||||
fn into(self) -> EthFilter {
|
||||
EthFilter {
|
||||
from_block: self.from_block.map_or_else(|| BlockId::Earliest, Into::into),
|
||||
from_block: self.from_block.map_or_else(|| BlockId::Latest, Into::into),
|
||||
to_block: self.to_block.map_or_else(|| BlockId::Latest, Into::into),
|
||||
address: self.address.and_then(|address| match address {
|
||||
VariadicValue::Null => None,
|
||||
|
10
rustfmt.toml
10
rustfmt.toml
@ -1,15 +1,17 @@
|
||||
verbose=false
|
||||
max_width=150
|
||||
ideal_width=120
|
||||
max_width=1000
|
||||
ideal_width=1000
|
||||
tabs_spaces=4
|
||||
fn_call_width=100
|
||||
fn_call_width=1000
|
||||
struct_lit_width=32
|
||||
fn_arg_indent="Tabbed"
|
||||
single_line_if_else=true
|
||||
where_indent="Visual"
|
||||
where_trailing_comma=true
|
||||
chain_base_indent="Inherit"
|
||||
chain_indent="Tabbed"
|
||||
chain_indent="Inherit"
|
||||
reorder_imports=true
|
||||
format_strings=false
|
||||
chain_overflow_last=false
|
||||
hard_tabs=true
|
||||
wrap_match_arms=false
|
||||
|
@ -1694,21 +1694,34 @@ mod tests {
|
||||
let good_blocks = vec![client.block_hash_delta_minus(2)];
|
||||
let retracted_blocks = vec![client.block_hash_delta_minus(1)];
|
||||
|
||||
// Add some balance to clients
|
||||
// Add some balance to clients and reset nonces
|
||||
for h in &[good_blocks[0], retracted_blocks[0]] {
|
||||
let block = client.block(BlockId::Hash(*h)).unwrap();
|
||||
let view = BlockView::new(&block);
|
||||
client.set_balance(view.transactions()[0].sender().unwrap(), U256::from(1_000_000_000));
|
||||
client.set_nonce(view.transactions()[0].sender().unwrap(), U256::from(0));
|
||||
}
|
||||
|
||||
let mut queue = VecDeque::new();
|
||||
let mut io = TestIo::new(&mut client, &mut queue, None);
|
||||
|
||||
// when
|
||||
{
|
||||
let mut queue = VecDeque::new();
|
||||
let mut io = TestIo::new(&mut client, &mut queue, None);
|
||||
sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks);
|
||||
assert_eq!(sync.miner.status().transactions_in_future_queue, 0);
|
||||
assert_eq!(sync.miner.status().transactions_in_pending_queue, 1);
|
||||
sync.chain_new_blocks(&mut io, &good_blocks, &[], &[], &retracted_blocks);
|
||||
}
|
||||
// We need to update nonce status (because we say that the block has been imported)
|
||||
for h in &[good_blocks[0]] {
|
||||
let block = client.block(BlockId::Hash(*h)).unwrap();
|
||||
let view = BlockView::new(&block);
|
||||
client.set_nonce(view.transactions()[0].sender().unwrap(), U256::from(1));
|
||||
}
|
||||
{
|
||||
let mut queue = VecDeque::new();
|
||||
let mut io = TestIo::new(&mut client, &mut queue, None);
|
||||
sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks);
|
||||
}
|
||||
|
||||
// then
|
||||
let status = sync.miner.status();
|
||||
@ -1735,7 +1748,7 @@ mod tests {
|
||||
sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks);
|
||||
assert_eq!(sync.miner.status().transactions_in_future_queue, 0);
|
||||
assert_eq!(sync.miner.status().transactions_in_pending_queue, 0);
|
||||
sync.chain_new_blocks(&mut io, &good_blocks, &[], &[], &retracted_blocks);
|
||||
sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks);
|
||||
|
||||
// then
|
||||
let status = sync.miner.status();
|
||||
|
@ -17,7 +17,7 @@ ethcore-rpc = { path = "../rpc" }
|
||||
ethcore-util = { path = "../util" }
|
||||
parity-webapp = { git = "https://github.com/tomusdrw/parity-webapp.git" }
|
||||
# List of apps
|
||||
parity-status = { git = "https://github.com/tomusdrw/parity-status.git", version = "0.1.5" }
|
||||
parity-status = { git = "https://github.com/tomusdrw/parity-status.git", version = "0.1.7" }
|
||||
parity-wallet = { git = "https://github.com/tomusdrw/parity-wallet.git", optional = true }
|
||||
clippy = { version = "0.0.63", optional = true}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user