Merge branch 'master' into tx_queue_integration
Conflicts: sync/src/transaction_queue.rs
This commit is contained in:
commit
d4e7eafede
34
Cargo.lock
generated
34
Cargo.lock
generated
@ -15,7 +15,9 @@ dependencies = [
|
|||||||
"fdlimit 0.1.0",
|
"fdlimit 0.1.0",
|
||||||
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"rpassword 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
|
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -217,6 +219,7 @@ dependencies = [
|
|||||||
"num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
"num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rust-crypto 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rust-crypto 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
|
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -236,10 +239,11 @@ dependencies = [
|
|||||||
"ethcore 0.9.99",
|
"ethcore 0.9.99",
|
||||||
"ethcore-util 0.9.99",
|
"ethcore-util 0.9.99",
|
||||||
"ethsync 0.9.99",
|
"ethsync 0.9.99",
|
||||||
"jsonrpc-core 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"jsonrpc-core 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"jsonrpc-http-server 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"jsonrpc-http-server 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_codegen 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_codegen 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_json 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -295,6 +299,7 @@ dependencies = [
|
|||||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
|
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -409,7 +414,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonrpc-core"
|
name = "jsonrpc-core"
|
||||||
version = "1.2.0"
|
version = "2.0.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -420,11 +425,11 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonrpc-http-server"
|
name = "jsonrpc-http-server"
|
||||||
version = "2.1.0"
|
version = "3.0.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"hyper 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"hyper 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"jsonrpc-core 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"jsonrpc-core 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"unicase 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"unicase 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -699,6 +704,17 @@ dependencies = [
|
|||||||
"librocksdb-sys 0.2.1 (git+https://github.com/arkpar/rust-rocksdb.git)",
|
"librocksdb-sys 0.2.1 (git+https://github.com/arkpar/rust-rocksdb.git)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rpassword"
|
||||||
|
version = "0.1.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"termios 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rust-crypto"
|
name = "rust-crypto"
|
||||||
version = "0.2.34"
|
version = "0.2.34"
|
||||||
@ -832,6 +848,14 @@ dependencies = [
|
|||||||
"winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "termios"
|
||||||
|
version = "0.2.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "time"
|
name = "time"
|
||||||
version = "0.1.34"
|
version = "0.1.34"
|
||||||
|
28
Cargo.toml
28
Cargo.toml
@ -4,6 +4,10 @@ name = "parity"
|
|||||||
version = "0.9.99"
|
version = "0.9.99"
|
||||||
license = "GPL-3.0"
|
license = "GPL-3.0"
|
||||||
authors = ["Ethcore <admin@ethcore.io>"]
|
authors = ["Ethcore <admin@ethcore.io>"]
|
||||||
|
build = "build.rs"
|
||||||
|
|
||||||
|
[build-dependencies]
|
||||||
|
rustc_version = "0.1"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
log = "0.3"
|
log = "0.3"
|
||||||
@ -12,22 +16,30 @@ rustc-serialize = "0.3"
|
|||||||
docopt = "0.6"
|
docopt = "0.6"
|
||||||
time = "0.1"
|
time = "0.1"
|
||||||
ctrlc = { git = "https://github.com/tomusdrw/rust-ctrlc.git" }
|
ctrlc = { git = "https://github.com/tomusdrw/rust-ctrlc.git" }
|
||||||
clippy = { version = "0.0.44", optional = true }
|
|
||||||
ethcore-util = { path = "util" }
|
|
||||||
ethcore = { path = "ethcore" }
|
|
||||||
ethsync = { path = "sync" }
|
|
||||||
ethcore-rpc = { path = "rpc", optional = true }
|
|
||||||
fdlimit = { path = "util/fdlimit" }
|
fdlimit = { path = "util/fdlimit" }
|
||||||
daemonize = "0.2"
|
daemonize = "0.2"
|
||||||
ethcore-devtools = { path = "devtools" }
|
|
||||||
number_prefix = "0.2"
|
number_prefix = "0.2"
|
||||||
|
clippy = { version = "0.0.44", optional = true }
|
||||||
|
ethcore = { path = "ethcore" }
|
||||||
|
ethcore-util = { path = "util" }
|
||||||
|
ethsync = { path = "sync" }
|
||||||
|
ethcore-devtools = { path = "devtools" }
|
||||||
|
ethcore-rpc = { path = "rpc", optional = true }
|
||||||
|
rpassword = "0.1"
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
ethcore = { path = "ethcore", features = ["dev"] }
|
||||||
|
ethcore-util = { path = "util", features = ["dev"] }
|
||||||
|
ethsync = { path = "sync", features = ["dev"] }
|
||||||
|
ethcore-rpc = { path = "rpc", features = ["dev"] }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["rpc"]
|
default = ["rpc"]
|
||||||
rpc = ["ethcore-rpc"]
|
rpc = ["ethcore-rpc"]
|
||||||
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev"]
|
dev = ["ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev"]
|
||||||
|
dev-clippy = ["clippy", "ethcore/clippy", "ethcore-util/clippy", "ethsync/clippy", "ethcore-rpc/clippy"]
|
||||||
travis-beta = ["ethcore/json-tests"]
|
travis-beta = ["ethcore/json-tests"]
|
||||||
travis-nightly = ["ethcore/json-tests", "dev"]
|
travis-nightly = ["ethcore/json-tests", "dev-clippy", "dev"]
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
path = "parity/main.rs"
|
path = "parity/main.rs"
|
||||||
|
25
build.rs
Normal file
25
build.rs
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
extern crate rustc_version;
|
||||||
|
|
||||||
|
use rustc_version::{version_meta, Channel};
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
if let Channel::Nightly = version_meta().channel {
|
||||||
|
println!("cargo:rustc-cfg=nightly");
|
||||||
|
}
|
||||||
|
}
|
@ -5,6 +5,10 @@ license = "GPL-3.0"
|
|||||||
name = "ethcore"
|
name = "ethcore"
|
||||||
version = "0.9.99"
|
version = "0.9.99"
|
||||||
authors = ["Ethcore <admin@ethcore.io>"]
|
authors = ["Ethcore <admin@ethcore.io>"]
|
||||||
|
build = "build.rs"
|
||||||
|
|
||||||
|
[build-dependencies]
|
||||||
|
rustc_version = "0.1"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
log = "0.3"
|
log = "0.3"
|
||||||
@ -27,5 +31,5 @@ jit = ["evmjit"]
|
|||||||
evm-debug = []
|
evm-debug = []
|
||||||
json-tests = []
|
json-tests = []
|
||||||
test-heavy = []
|
test-heavy = []
|
||||||
dev = ["clippy"]
|
dev = []
|
||||||
default = []
|
default = []
|
||||||
|
25
ethcore/build.rs
Normal file
25
ethcore/build.rs
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
extern crate rustc_version;
|
||||||
|
|
||||||
|
use rustc_version::{version_meta, Channel};
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
if let Channel::Nightly = version_meta().channel {
|
||||||
|
println!("cargo:rustc-cfg=nightly");
|
||||||
|
}
|
||||||
|
}
|
@ -24,7 +24,7 @@ pub type LogBloom = H2048;
|
|||||||
/// Constant 2048-bit datum for 0. Often used as a default.
|
/// Constant 2048-bit datum for 0. Often used as a default.
|
||||||
pub static ZERO_LOGBLOOM: LogBloom = H2048([0x00; 256]);
|
pub static ZERO_LOGBLOOM: LogBloom = H2048([0x00; 256]);
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(enum_variant_names))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(enum_variant_names))]
|
||||||
/// Semantic boolean for when a seal/signature is included.
|
/// Semantic boolean for when a seal/signature is included.
|
||||||
pub enum Seal {
|
pub enum Seal {
|
||||||
/// The seal/signature is included.
|
/// The seal/signature is included.
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
//! Blockchain block.
|
//! Blockchain block.
|
||||||
|
|
||||||
#![cfg_attr(feature="dev", allow(ptr_arg))] // Because of &LastHashes -> &Vec<_>
|
#![cfg_attr(all(nightly, feature="dev"), allow(ptr_arg))] // Because of &LastHashes -> &Vec<_>
|
||||||
|
|
||||||
use common::*;
|
use common::*;
|
||||||
use engine::*;
|
use engine::*;
|
||||||
|
@ -121,7 +121,7 @@ struct QueueSignal {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl QueueSignal {
|
impl QueueSignal {
|
||||||
#[cfg_attr(feature="dev", allow(bool_comparison))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(bool_comparison))]
|
||||||
fn set(&self) {
|
fn set(&self) {
|
||||||
if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false {
|
if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false {
|
||||||
self.message_channel.send(UserMessage(SyncMessage::BlockVerified)).expect("Error sending BlockVerified message");
|
self.message_channel.send(UserMessage(SyncMessage::BlockVerified)).expect("Error sending BlockVerified message");
|
||||||
@ -320,6 +320,9 @@ impl BlockQueue {
|
|||||||
|
|
||||||
/// Mark given block and all its children as bad. Stops verification.
|
/// Mark given block and all its children as bad. Stops verification.
|
||||||
pub fn mark_as_bad(&mut self, block_hashes: &[H256]) {
|
pub fn mark_as_bad(&mut self, block_hashes: &[H256]) {
|
||||||
|
if block_hashes.is_empty() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
let mut verification_lock = self.verification.lock().unwrap();
|
let mut verification_lock = self.verification.lock().unwrap();
|
||||||
let mut processing = self.processing.write().unwrap();
|
let mut processing = self.processing.write().unwrap();
|
||||||
|
|
||||||
@ -345,6 +348,9 @@ impl BlockQueue {
|
|||||||
|
|
||||||
/// Mark given block as processed
|
/// Mark given block as processed
|
||||||
pub fn mark_as_good(&mut self, block_hashes: &[H256]) {
|
pub fn mark_as_good(&mut self, block_hashes: &[H256]) {
|
||||||
|
if block_hashes.is_empty() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
let mut processing = self.processing.write().unwrap();
|
let mut processing = self.processing.write().unwrap();
|
||||||
for hash in block_hashes {
|
for hash in block_hashes {
|
||||||
processing.remove(&hash);
|
processing.remove(&hash);
|
||||||
|
@ -884,7 +884,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
||||||
fn test_find_uncles() {
|
fn test_find_uncles() {
|
||||||
let mut canon_chain = ChainGenerator::default();
|
let mut canon_chain = ChainGenerator::default();
|
||||||
let mut finalizer = BlockFinalizer::default();
|
let mut finalizer = BlockFinalizer::default();
|
||||||
@ -922,7 +922,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
||||||
fn test_small_fork() {
|
fn test_small_fork() {
|
||||||
let mut canon_chain = ChainGenerator::default();
|
let mut canon_chain = ChainGenerator::default();
|
||||||
let mut finalizer = BlockFinalizer::default();
|
let mut finalizer = BlockFinalizer::default();
|
||||||
|
@ -29,7 +29,7 @@ pub trait ChainIterator: Iterator + Sized {
|
|||||||
/// Blocks generated by fork will have lower difficulty than current chain.
|
/// Blocks generated by fork will have lower difficulty than current chain.
|
||||||
fn fork(&self, fork_number: usize) -> Fork<Self> where Self: Clone;
|
fn fork(&self, fork_number: usize) -> Fork<Self> where Self: Clone;
|
||||||
/// Should be called to make every consecutive block have given bloom.
|
/// Should be called to make every consecutive block have given bloom.
|
||||||
fn with_bloom<'a>(&'a mut self, bloom: H2048) -> Bloom<'a, Self>;
|
fn with_bloom(&mut self, bloom: H2048) -> Bloom<Self>;
|
||||||
/// Should be called to complete block. Without complete, block may have incorrect hash.
|
/// Should be called to complete block. Without complete, block may have incorrect hash.
|
||||||
fn complete<'a>(&'a mut self, finalizer: &'a mut BlockFinalizer) -> Complete<'a, Self>;
|
fn complete<'a>(&'a mut self, finalizer: &'a mut BlockFinalizer) -> Complete<'a, Self>;
|
||||||
/// Completes and generates block.
|
/// Completes and generates block.
|
||||||
@ -44,7 +44,7 @@ impl<I> ChainIterator for I where I: Iterator + Sized {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn with_bloom<'a>(&'a mut self, bloom: H2048) -> Bloom<'a, Self> {
|
fn with_bloom(&mut self, bloom: H2048) -> Bloom<Self> {
|
||||||
Bloom {
|
Bloom {
|
||||||
iter: self,
|
iter: self,
|
||||||
bloom: bloom
|
bloom: bloom
|
||||||
|
@ -410,9 +410,13 @@ impl<V> Client<V> where V: Verifier {
|
|||||||
|
|
||||||
{
|
{
|
||||||
let mut block_queue = self.block_queue.write().unwrap();
|
let mut block_queue = self.block_queue.write().unwrap();
|
||||||
|
if !bad_blocks.is_empty() {
|
||||||
block_queue.mark_as_bad(&bad_blocks);
|
block_queue.mark_as_bad(&bad_blocks);
|
||||||
|
}
|
||||||
|
if !good_blocks.is_empty() {
|
||||||
block_queue.mark_as_good(&good_blocks);
|
block_queue.mark_as_good(&good_blocks);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let block_queue = self.block_queue.read().unwrap();
|
let block_queue = self.block_queue.read().unwrap();
|
||||||
|
@ -202,7 +202,7 @@ impl Engine for Ethash {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(wrong_self_convention))] // to_ethash should take self
|
#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] // to_ethash should take self
|
||||||
impl Ethash {
|
impl Ethash {
|
||||||
fn calculate_difficuty(&self, header: &Header, parent: &Header) -> U256 {
|
fn calculate_difficuty(&self, header: &Header, parent: &Header) -> U256 {
|
||||||
const EXP_DIFF_PERIOD: u64 = 100000;
|
const EXP_DIFF_PERIOD: u64 = 100000;
|
||||||
|
@ -243,7 +243,7 @@ struct CodeReader<'a> {
|
|||||||
code: &'a Bytes
|
code: &'a Bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(len_without_is_empty))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(len_without_is_empty))]
|
||||||
impl<'a> CodeReader<'a> {
|
impl<'a> CodeReader<'a> {
|
||||||
/// Get `no_of_bytes` from code and convert to U256. Move PC
|
/// Get `no_of_bytes` from code and convert to U256. Move PC
|
||||||
fn read(&mut self, no_of_bytes: usize) -> U256 {
|
fn read(&mut self, no_of_bytes: usize) -> U256 {
|
||||||
@ -258,7 +258,7 @@ impl<'a> CodeReader<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(enum_variant_names))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(enum_variant_names))]
|
||||||
enum InstructionCost {
|
enum InstructionCost {
|
||||||
Gas(U256),
|
Gas(U256),
|
||||||
GasMem(U256, U256),
|
GasMem(U256, U256),
|
||||||
@ -347,7 +347,7 @@ impl evm::Evm for Interpreter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Interpreter {
|
impl Interpreter {
|
||||||
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
||||||
fn get_gas_cost_mem(&self,
|
fn get_gas_cost_mem(&self,
|
||||||
ext: &evm::Ext,
|
ext: &evm::Ext,
|
||||||
instruction: Instruction,
|
instruction: Instruction,
|
||||||
|
@ -25,9 +25,8 @@ struct FakeLogEntry {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Hash, Debug)]
|
#[derive(PartialEq, Eq, Hash, Debug)]
|
||||||
#[cfg_attr(feature="dev", allow(enum_variant_names))] // Common prefix is C ;)
|
|
||||||
enum FakeCallType {
|
enum FakeCallType {
|
||||||
CALL, CREATE
|
Call, Create
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Hash, Debug)]
|
#[derive(PartialEq, Eq, Hash, Debug)]
|
||||||
@ -94,7 +93,7 @@ impl Ext for FakeExt {
|
|||||||
|
|
||||||
fn create(&mut self, gas: &U256, value: &U256, code: &[u8]) -> ContractCreateResult {
|
fn create(&mut self, gas: &U256, value: &U256, code: &[u8]) -> ContractCreateResult {
|
||||||
self.calls.insert(FakeCall {
|
self.calls.insert(FakeCall {
|
||||||
call_type: FakeCallType::CREATE,
|
call_type: FakeCallType::Create,
|
||||||
gas: *gas,
|
gas: *gas,
|
||||||
sender_address: None,
|
sender_address: None,
|
||||||
receive_address: None,
|
receive_address: None,
|
||||||
@ -115,7 +114,7 @@ impl Ext for FakeExt {
|
|||||||
_output: &mut [u8]) -> MessageCallResult {
|
_output: &mut [u8]) -> MessageCallResult {
|
||||||
|
|
||||||
self.calls.insert(FakeCall {
|
self.calls.insert(FakeCall {
|
||||||
call_type: FakeCallType::CALL,
|
call_type: FakeCallType::Call,
|
||||||
gas: *gas,
|
gas: *gas,
|
||||||
sender_address: Some(sender_address.clone()),
|
sender_address: Some(sender_address.clone()),
|
||||||
receive_address: Some(receive_address.clone()),
|
receive_address: Some(receive_address.clone()),
|
||||||
@ -909,7 +908,7 @@ fn test_calls(factory: super::Factory) {
|
|||||||
};
|
};
|
||||||
|
|
||||||
assert_set_contains(&ext.calls, &FakeCall {
|
assert_set_contains(&ext.calls, &FakeCall {
|
||||||
call_type: FakeCallType::CALL,
|
call_type: FakeCallType::Call,
|
||||||
gas: U256::from(2556),
|
gas: U256::from(2556),
|
||||||
sender_address: Some(address.clone()),
|
sender_address: Some(address.clone()),
|
||||||
receive_address: Some(code_address.clone()),
|
receive_address: Some(code_address.clone()),
|
||||||
@ -918,7 +917,7 @@ fn test_calls(factory: super::Factory) {
|
|||||||
code_address: Some(code_address.clone())
|
code_address: Some(code_address.clone())
|
||||||
});
|
});
|
||||||
assert_set_contains(&ext.calls, &FakeCall {
|
assert_set_contains(&ext.calls, &FakeCall {
|
||||||
call_type: FakeCallType::CALL,
|
call_type: FakeCallType::Call,
|
||||||
gas: U256::from(2556),
|
gas: U256::from(2556),
|
||||||
sender_address: Some(address.clone()),
|
sender_address: Some(address.clone()),
|
||||||
receive_address: Some(address.clone()),
|
receive_address: Some(address.clone()),
|
||||||
|
@ -188,7 +188,7 @@ impl<'a> Ext for Externalities<'a> {
|
|||||||
self.state.code(address).unwrap_or_else(|| vec![])
|
self.state.code(address).unwrap_or_else(|| vec![])
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(match_ref_pats))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(match_ref_pats))]
|
||||||
fn ret(&mut self, gas: &U256, data: &[u8]) -> Result<U256, evm::Error> {
|
fn ret(&mut self, gas: &U256, data: &[u8]) -> Result<U256, evm::Error> {
|
||||||
match &mut self.output {
|
match &mut self.output {
|
||||||
&mut OutputPolicy::Return(BytesRef::Fixed(ref mut slice)) => unsafe {
|
&mut OutputPolicy::Return(BytesRef::Fixed(ref mut slice)) => unsafe {
|
||||||
|
@ -15,16 +15,16 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
#![warn(missing_docs)]
|
#![warn(missing_docs)]
|
||||||
#![cfg_attr(feature="dev", feature(plugin))]
|
#![cfg_attr(all(nightly, feature="dev"), feature(plugin))]
|
||||||
#![cfg_attr(feature="dev", plugin(clippy))]
|
#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))]
|
||||||
|
|
||||||
// Clippy config
|
// Clippy config
|
||||||
// TODO [todr] not really sure
|
// TODO [todr] not really sure
|
||||||
#![cfg_attr(feature="dev", allow(needless_range_loop))]
|
#![cfg_attr(all(nightly, feature="dev"), allow(needless_range_loop))]
|
||||||
// Shorter than if-else
|
// Shorter than if-else
|
||||||
#![cfg_attr(feature="dev", allow(match_bool))]
|
#![cfg_attr(all(nightly, feature="dev"), allow(match_bool))]
|
||||||
// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref.
|
// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref.
|
||||||
#![cfg_attr(feature="dev", allow(clone_on_copy))]
|
#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))]
|
||||||
|
|
||||||
//! Ethcore library
|
//! Ethcore library
|
||||||
//!
|
//!
|
||||||
|
@ -117,12 +117,11 @@ impl IoHandler<NetSyncMessage> for ClientIoHandler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(match_ref_pats))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(single_match))]
|
||||||
#[cfg_attr(feature="dev", allow(single_match))]
|
|
||||||
fn message(&self, io: &IoContext<NetSyncMessage>, net_message: &NetSyncMessage) {
|
fn message(&self, io: &IoContext<NetSyncMessage>, net_message: &NetSyncMessage) {
|
||||||
if let &UserMessage(ref message) = net_message {
|
if let UserMessage(ref message) = *net_message {
|
||||||
match message {
|
match *message {
|
||||||
&SyncMessage::BlockVerified => {
|
SyncMessage::BlockVerified => {
|
||||||
self.client.import_verified_blocks(&io.channel());
|
self.client.import_verified_blocks(&io.channel());
|
||||||
},
|
},
|
||||||
_ => {}, // ignore other messages
|
_ => {}, // ignore other messages
|
||||||
|
@ -99,7 +99,7 @@ pub struct Spec {
|
|||||||
genesis_state: PodState,
|
genesis_state: PodState,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(wrong_self_convention))] // because to_engine(self) should be to_engine(&self)
|
#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] // because to_engine(self) should be to_engine(&self)
|
||||||
impl Spec {
|
impl Spec {
|
||||||
/// Convert this object into a boxed Engine of the right underlying type.
|
/// Convert this object into a boxed Engine of the right underlying type.
|
||||||
// TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead.
|
// TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead.
|
||||||
|
@ -224,7 +224,7 @@ impl State {
|
|||||||
|
|
||||||
/// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit.
|
/// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit.
|
||||||
/// `accounts` is mutable because we may need to commit the code or storage and record that.
|
/// `accounts` is mutable because we may need to commit the code or storage and record that.
|
||||||
#[cfg_attr(feature="dev", allow(match_ref_pats))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(match_ref_pats))]
|
||||||
pub fn commit_into(db: &mut HashDB, root: &mut H256, accounts: &mut HashMap<Address, Option<Account>>) {
|
pub fn commit_into(db: &mut HashDB, root: &mut H256, accounts: &mut HashMap<Address, Option<Account>>) {
|
||||||
// first, commit the sub trees.
|
// first, commit the sub trees.
|
||||||
// TODO: is this necessary or can we dispense with the `ref mut a` for just `a`?
|
// TODO: is this necessary or can we dispense with the `ref mut a` for just `a`?
|
||||||
|
@ -80,7 +80,7 @@ impl Transaction {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl FromJson for SignedTransaction {
|
impl FromJson for SignedTransaction {
|
||||||
#[cfg_attr(feature="dev", allow(single_char_pattern))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(single_char_pattern))]
|
||||||
fn from_json(json: &Json) -> SignedTransaction {
|
fn from_json(json: &Json) -> SignedTransaction {
|
||||||
let t = Transaction {
|
let t = Transaction {
|
||||||
nonce: xjson!(&json["nonce"]),
|
nonce: xjson!(&json["nonce"]),
|
||||||
|
@ -17,9 +17,11 @@
|
|||||||
pub mod verification;
|
pub mod verification;
|
||||||
pub mod verifier;
|
pub mod verifier;
|
||||||
mod canon_verifier;
|
mod canon_verifier;
|
||||||
|
#[cfg(test)]
|
||||||
mod noop_verifier;
|
mod noop_verifier;
|
||||||
|
|
||||||
pub use self::verification::*;
|
pub use self::verification::*;
|
||||||
pub use self::verifier::Verifier;
|
pub use self::verifier::Verifier;
|
||||||
pub use self::canon_verifier::CanonVerifier;
|
pub use self::canon_verifier::CanonVerifier;
|
||||||
|
#[cfg(test)]
|
||||||
pub use self::noop_verifier::NoopVerifier;
|
pub use self::noop_verifier::NoopVerifier;
|
||||||
|
2
hook.sh
2
hook.sh
@ -1,3 +1,3 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
echo "#!/bin/sh\ncargo test -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity --features dev" > ./.git/hooks/pre-push
|
echo "#!/bin/sh\ncargo test -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity --features dev-clippy" > ./.git/hooks/pre-push
|
||||||
chmod +x ./.git/hooks/pre-push
|
chmod +x ./.git/hooks/pre-push
|
||||||
|
@ -17,8 +17,8 @@
|
|||||||
//! Ethcore client application.
|
//! Ethcore client application.
|
||||||
|
|
||||||
#![warn(missing_docs)]
|
#![warn(missing_docs)]
|
||||||
#![cfg_attr(feature="dev", feature(plugin))]
|
#![cfg_attr(all(nightly, feature="dev"), feature(plugin))]
|
||||||
#![cfg_attr(feature="dev", plugin(clippy))]
|
#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))]
|
||||||
extern crate docopt;
|
extern crate docopt;
|
||||||
extern crate rustc_serialize;
|
extern crate rustc_serialize;
|
||||||
extern crate ethcore_util as util;
|
extern crate ethcore_util as util;
|
||||||
@ -32,6 +32,7 @@ extern crate fdlimit;
|
|||||||
extern crate daemonize;
|
extern crate daemonize;
|
||||||
extern crate time;
|
extern crate time;
|
||||||
extern crate number_prefix;
|
extern crate number_prefix;
|
||||||
|
extern crate rpassword;
|
||||||
|
|
||||||
#[cfg(feature = "rpc")]
|
#[cfg(feature = "rpc")]
|
||||||
extern crate ethcore_rpc as rpc;
|
extern crate ethcore_rpc as rpc;
|
||||||
@ -43,7 +44,7 @@ use std::path::PathBuf;
|
|||||||
use env_logger::LogBuilder;
|
use env_logger::LogBuilder;
|
||||||
use ctrlc::CtrlC;
|
use ctrlc::CtrlC;
|
||||||
use util::*;
|
use util::*;
|
||||||
use util::panics::MayPanic;
|
use util::panics::{MayPanic, ForwardPanic, PanicHandler};
|
||||||
use ethcore::spec::*;
|
use ethcore::spec::*;
|
||||||
use ethcore::client::*;
|
use ethcore::client::*;
|
||||||
use ethcore::service::{ClientService, NetSyncMessage};
|
use ethcore::service::{ClientService, NetSyncMessage};
|
||||||
@ -70,6 +71,7 @@ Parity. Ethereum Client.
|
|||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
parity daemon <pid-file> [options] [ --no-bootstrap | <enode>... ]
|
parity daemon <pid-file> [options] [ --no-bootstrap | <enode>... ]
|
||||||
|
parity account (new | list)
|
||||||
parity [options] [ --no-bootstrap | <enode>... ]
|
parity [options] [ --no-bootstrap | <enode>... ]
|
||||||
|
|
||||||
Protocol Options:
|
Protocol Options:
|
||||||
@ -126,6 +128,9 @@ Miscellaneous Options:
|
|||||||
#[derive(Debug, RustcDecodable)]
|
#[derive(Debug, RustcDecodable)]
|
||||||
struct Args {
|
struct Args {
|
||||||
cmd_daemon: bool,
|
cmd_daemon: bool,
|
||||||
|
cmd_account: bool,
|
||||||
|
cmd_new: bool,
|
||||||
|
cmd_list: bool,
|
||||||
arg_pid_file: String,
|
arg_pid_file: String,
|
||||||
arg_enode: Vec<String>,
|
arg_enode: Vec<String>,
|
||||||
flag_chain: String,
|
flag_chain: String,
|
||||||
@ -190,10 +195,10 @@ fn setup_log(init: &Option<String>) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "rpc")]
|
#[cfg(feature = "rpc")]
|
||||||
fn setup_rpc_server(client: Arc<Client>, sync: Arc<EthSync>, url: &str, cors_domain: &str, apis: Vec<&str>) {
|
fn setup_rpc_server(client: Arc<Client>, sync: Arc<EthSync>, url: &str, cors_domain: &str, apis: Vec<&str>) -> Option<Arc<PanicHandler>> {
|
||||||
use rpc::v1::*;
|
use rpc::v1::*;
|
||||||
|
|
||||||
let mut server = rpc::HttpServer::new(1);
|
let server = rpc::RpcServer::new();
|
||||||
for api in apis.into_iter() {
|
for api in apis.into_iter() {
|
||||||
match api {
|
match api {
|
||||||
"web3" => server.add_delegate(Web3Client::new().to_delegate()),
|
"web3" => server.add_delegate(Web3Client::new().to_delegate()),
|
||||||
@ -207,11 +212,12 @@ fn setup_rpc_server(client: Arc<Client>, sync: Arc<EthSync>, url: &str, cors_dom
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
server.start_async(url, cors_domain);
|
Some(server.start_http(url, cors_domain, 1))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(feature = "rpc"))]
|
#[cfg(not(feature = "rpc"))]
|
||||||
fn setup_rpc_server(_client: Arc<Client>, _sync: Arc<EthSync>, _url: &str) {
|
fn setup_rpc_server(_client: Arc<Client>, _sync: Arc<EthSync>, _url: &str) -> Option<Arc<PanicHandler>> {
|
||||||
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
fn print_version() {
|
fn print_version() {
|
||||||
@ -287,7 +293,7 @@ impl Configuration {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(useless_format))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(useless_format))]
|
||||||
fn net_addresses(&self) -> (Option<SocketAddr>, Option<SocketAddr>) {
|
fn net_addresses(&self) -> (Option<SocketAddr>, Option<SocketAddr>) {
|
||||||
let mut listen_address = None;
|
let mut listen_address = None;
|
||||||
let mut public_address = None;
|
let mut public_address = None;
|
||||||
@ -336,10 +342,44 @@ impl Configuration {
|
|||||||
.start()
|
.start()
|
||||||
.unwrap_or_else(|e| die!("Couldn't daemonize; {}", e));
|
.unwrap_or_else(|e| die!("Couldn't daemonize; {}", e));
|
||||||
}
|
}
|
||||||
|
if self.args.cmd_account {
|
||||||
|
self.execute_account_cli();
|
||||||
|
return;
|
||||||
|
}
|
||||||
self.execute_client();
|
self.execute_client();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn execute_account_cli(&self) {
|
||||||
|
use util::keys::store::SecretStore;
|
||||||
|
use rpassword::read_password;
|
||||||
|
let mut secret_store = SecretStore::new();
|
||||||
|
if self.args.cmd_new {
|
||||||
|
println!("Please note that password is NOT RECOVERABLE.");
|
||||||
|
println!("Type password: ");
|
||||||
|
let password = read_password().unwrap();
|
||||||
|
println!("Repeat password: ");
|
||||||
|
let password_repeat = read_password().unwrap();
|
||||||
|
if password != password_repeat {
|
||||||
|
println!("Passwords do not match!");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
println!("New account address:");
|
||||||
|
let new_address = secret_store.new_account(&password).unwrap();
|
||||||
|
println!("{:?}", new_address);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if self.args.cmd_list {
|
||||||
|
println!("Known addresses:");
|
||||||
|
for &(addr, _) in secret_store.accounts().unwrap().iter() {
|
||||||
|
println!("{:?}", addr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn execute_client(&self) {
|
fn execute_client(&self) {
|
||||||
|
// Setup panic handler
|
||||||
|
let panic_handler = PanicHandler::new_in_arc();
|
||||||
|
|
||||||
// Setup logging
|
// Setup logging
|
||||||
setup_log(&self.args.flag_logging);
|
setup_log(&self.args.flag_logging);
|
||||||
// Raise fdlimit
|
// Raise fdlimit
|
||||||
@ -366,6 +406,7 @@ impl Configuration {
|
|||||||
client_config.name = self.args.flag_identity.clone();
|
client_config.name = self.args.flag_identity.clone();
|
||||||
client_config.queue.max_mem_use = self.args.flag_queue_max_size;
|
client_config.queue.max_mem_use = self.args.flag_queue_max_size;
|
||||||
let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap();
|
let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap();
|
||||||
|
panic_handler.forward_from(&service);
|
||||||
let client = service.client().clone();
|
let client = service.client().clone();
|
||||||
client.set_author(self.author());
|
client.set_author(self.author());
|
||||||
client.set_extra_data(self.extra_data());
|
client.set_extra_data(self.extra_data());
|
||||||
@ -383,30 +424,36 @@ impl Configuration {
|
|||||||
let cors = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors);
|
let cors = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors);
|
||||||
// TODO: use this as the API list.
|
// TODO: use this as the API list.
|
||||||
let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis);
|
let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis);
|
||||||
setup_rpc_server(service.client(), sync.clone(), &url, cors, apis.split(",").collect());
|
let server_handler = setup_rpc_server(service.client(), sync.clone(), &url, cors, apis.split(",").collect());
|
||||||
|
if let Some(handler) = server_handler {
|
||||||
|
panic_handler.forward_from(handler.deref());
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register IO handler
|
// Register IO handler
|
||||||
let io_handler = Arc::new(ClientIoHandler {
|
let io_handler = Arc::new(ClientIoHandler {
|
||||||
client: service.client(),
|
client: service.client(),
|
||||||
info: Default::default(),
|
info: Default::default(),
|
||||||
sync: sync
|
sync: sync.clone(),
|
||||||
});
|
});
|
||||||
service.io().register_handler(io_handler).expect("Error registering IO handler");
|
service.io().register_handler(io_handler).expect("Error registering IO handler");
|
||||||
|
|
||||||
// Handle exit
|
// Handle exit
|
||||||
wait_for_exit(&service);
|
wait_for_exit(panic_handler);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn wait_for_exit(client_service: &ClientService) {
|
fn wait_for_exit(panic_handler: Arc<PanicHandler>) {
|
||||||
let exit = Arc::new(Condvar::new());
|
let exit = Arc::new(Condvar::new());
|
||||||
|
|
||||||
// Handle possible exits
|
// Handle possible exits
|
||||||
let e = exit.clone();
|
let e = exit.clone();
|
||||||
CtrlC::set_handler(move || { e.notify_all(); });
|
CtrlC::set_handler(move || { e.notify_all(); });
|
||||||
|
|
||||||
|
// Handle panics
|
||||||
let e = exit.clone();
|
let e = exit.clone();
|
||||||
client_service.on_panic(move |_reason| { e.notify_all(); });
|
panic_handler.on_panic(move |_reason| { e.notify_all(); });
|
||||||
|
|
||||||
// Wait for signal
|
// Wait for signal
|
||||||
let mutex = Mutex::new(());
|
let mutex = Mutex::new(());
|
||||||
|
@ -12,8 +12,8 @@ build = "build.rs"
|
|||||||
log = "0.3"
|
log = "0.3"
|
||||||
serde = "0.7.0"
|
serde = "0.7.0"
|
||||||
serde_json = "0.7.0"
|
serde_json = "0.7.0"
|
||||||
jsonrpc-core = "1.2"
|
jsonrpc-core = "2.0"
|
||||||
jsonrpc-http-server = "2.1"
|
jsonrpc-http-server = "3.0"
|
||||||
ethcore-util = { path = "../util" }
|
ethcore-util = { path = "../util" }
|
||||||
ethcore = { path = "../ethcore" }
|
ethcore = { path = "../ethcore" }
|
||||||
ethash = { path = "../ethash" }
|
ethash = { path = "../ethash" }
|
||||||
@ -26,8 +26,9 @@ serde_macros = { version = "0.7.0", optional = true }
|
|||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
serde_codegen = { version = "0.7.0", optional = true }
|
serde_codegen = { version = "0.7.0", optional = true }
|
||||||
syntex = "0.29.0"
|
syntex = "0.29.0"
|
||||||
|
rustc_version = "0.1"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["serde_codegen"]
|
default = ["serde_codegen"]
|
||||||
nightly = ["serde_macros"]
|
nightly = ["serde_macros"]
|
||||||
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev"]
|
dev = ["ethcore/dev", "ethcore-util/dev", "ethsync/dev"]
|
||||||
|
23
rpc/build.rs
23
rpc/build.rs
@ -1,3 +1,23 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
extern crate rustc_version;
|
||||||
|
|
||||||
|
use rustc_version::{version_meta, Channel};
|
||||||
|
|
||||||
#[cfg(not(feature = "serde_macros"))]
|
#[cfg(not(feature = "serde_macros"))]
|
||||||
mod inner {
|
mod inner {
|
||||||
extern crate syntex;
|
extern crate syntex;
|
||||||
@ -26,4 +46,7 @@ mod inner {
|
|||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
inner::main();
|
inner::main();
|
||||||
|
if let Channel::Nightly = version_meta().channel {
|
||||||
|
println!("cargo:rustc-cfg=nightly");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -29,33 +29,43 @@ extern crate ethcore;
|
|||||||
extern crate ethsync;
|
extern crate ethsync;
|
||||||
extern crate transient_hashmap;
|
extern crate transient_hashmap;
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::thread;
|
||||||
|
use util::panics::PanicHandler;
|
||||||
use self::jsonrpc_core::{IoHandler, IoDelegate};
|
use self::jsonrpc_core::{IoHandler, IoDelegate};
|
||||||
|
|
||||||
pub mod v1;
|
pub mod v1;
|
||||||
|
|
||||||
/// Http server.
|
/// Http server.
|
||||||
pub struct HttpServer {
|
pub struct RpcServer {
|
||||||
handler: IoHandler,
|
handler: Arc<IoHandler>,
|
||||||
threads: usize
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HttpServer {
|
impl RpcServer {
|
||||||
/// Construct new http server object with given number of threads.
|
/// Construct new http server object with given number of threads.
|
||||||
pub fn new(threads: usize) -> HttpServer {
|
pub fn new() -> RpcServer {
|
||||||
HttpServer {
|
RpcServer {
|
||||||
handler: IoHandler::new(),
|
handler: Arc::new(IoHandler::new()),
|
||||||
threads: threads
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add io delegate.
|
/// Add io delegate.
|
||||||
pub fn add_delegate<D>(&mut self, delegate: IoDelegate<D>) where D: Send + Sync + 'static {
|
pub fn add_delegate<D>(&self, delegate: IoDelegate<D>) where D: Send + Sync + 'static {
|
||||||
self.handler.add_delegate(delegate);
|
self.handler.add_delegate(delegate);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Start server asynchronously in new thread
|
/// Start server asynchronously in new thread and returns panic handler.
|
||||||
pub fn start_async(self, addr: &str, cors_domain: &str) {
|
pub fn start_http(&self, addr: &str, cors_domain: &str, threads: usize) -> Arc<PanicHandler> {
|
||||||
let server = jsonrpc_http_server::Server::new(self.handler, self.threads);
|
let addr = addr.to_owned();
|
||||||
server.start_async(addr, jsonrpc_http_server::AccessControlAllowOrigin::Value(cors_domain.to_owned()))
|
let cors_domain = cors_domain.to_owned();
|
||||||
|
let panic_handler = PanicHandler::new_in_arc();
|
||||||
|
let ph = panic_handler.clone();
|
||||||
|
let server = jsonrpc_http_server::Server::new(self.handler.clone());
|
||||||
|
thread::Builder::new().name("jsonrpc_http".to_string()).spawn(move || {
|
||||||
|
ph.catch_panic(move || {
|
||||||
|
server.start(addr.as_ref(), jsonrpc_http_server::AccessControlAllowOrigin::Value(cors_domain), threads);
|
||||||
|
}).unwrap()
|
||||||
|
}).expect("Error while creating jsonrpc http thread");
|
||||||
|
panic_handler
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,9 +4,13 @@ name = "ethsync"
|
|||||||
version = "0.9.99"
|
version = "0.9.99"
|
||||||
license = "GPL-3.0"
|
license = "GPL-3.0"
|
||||||
authors = ["Ethcore <admin@ethcore.io"]
|
authors = ["Ethcore <admin@ethcore.io"]
|
||||||
|
build = "build.rs"
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
|
|
||||||
|
[build-dependencies]
|
||||||
|
rustc_version = "0.1"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
ethcore-util = { path = "../util" }
|
ethcore-util = { path = "../util" }
|
||||||
ethcore = { path = "../ethcore" }
|
ethcore = { path = "../ethcore" }
|
||||||
@ -21,4 +25,4 @@ rayon = "0.3.1"
|
|||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = []
|
default = []
|
||||||
dev = ["clippy", "ethcore/dev", "ethcore-util/dev"]
|
dev = ["ethcore/dev", "ethcore-util/dev"]
|
||||||
|
25
sync/build.rs
Normal file
25
sync/build.rs
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
extern crate rustc_version;
|
||||||
|
|
||||||
|
use rustc_version::{version_meta, Channel};
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
if let Channel::Nightly = version_meta().channel {
|
||||||
|
println!("cargo:rustc-cfg=nightly");
|
||||||
|
}
|
||||||
|
}
|
@ -274,7 +274,7 @@ impl ChainSync {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(for_kv_map))] // Because it's not possible to get `values_mut()`
|
#[cfg_attr(all(nightly, feature="dev"), allow(for_kv_map))] // Because it's not possible to get `values_mut()`
|
||||||
/// Rest sync. Clear all downloaded data but keep the queue
|
/// Rest sync. Clear all downloaded data but keep the queue
|
||||||
fn reset(&mut self) {
|
fn reset(&mut self) {
|
||||||
self.downloading_headers.clear();
|
self.downloading_headers.clear();
|
||||||
@ -342,7 +342,7 @@ impl ChainSync {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
||||||
/// Called by peer once it has new block headers during sync
|
/// Called by peer once it has new block headers during sync
|
||||||
fn on_peer_block_headers(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
|
fn on_peer_block_headers(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
|
||||||
self.reset_peer_asking(peer_id, PeerAsking::BlockHeaders);
|
self.reset_peer_asking(peer_id, PeerAsking::BlockHeaders);
|
||||||
@ -469,6 +469,7 @@ impl ChainSync {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Called by peer once it has new block bodies
|
/// Called by peer once it has new block bodies
|
||||||
|
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
||||||
fn on_peer_new_block(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
|
fn on_peer_new_block(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
|
||||||
let block_rlp = try!(r.at(0));
|
let block_rlp = try!(r.at(0));
|
||||||
let header_rlp = try!(block_rlp.at(0));
|
let header_rlp = try!(block_rlp.at(0));
|
||||||
@ -850,8 +851,8 @@ impl ChainSync {
|
|||||||
self.downloading_bodies.remove(&n);
|
self.downloading_bodies.remove(&n);
|
||||||
self.downloading_headers.remove(&n);
|
self.downloading_headers.remove(&n);
|
||||||
}
|
}
|
||||||
self.headers.remove_tail(&start);
|
self.headers.remove_from(&start);
|
||||||
self.bodies.remove_tail(&start);
|
self.bodies.remove_from(&start);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Request headers from a peer by block hash
|
/// Request headers from a peer by block hash
|
||||||
@ -935,7 +936,7 @@ impl ChainSync {
|
|||||||
let mut transaction_queue = self.transaction_queue.lock().unwrap();
|
let mut transaction_queue = self.transaction_queue.lock().unwrap();
|
||||||
for i in 0..item_count {
|
for i in 0..item_count {
|
||||||
let tx: SignedTransaction = try!(r.val_at(i));
|
let tx: SignedTransaction = try!(r.val_at(i));
|
||||||
transaction_queue.add(tx, &fetch_latest_nonce);
|
let _ = transaction_queue.add(tx, &fetch_latest_nonce);
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -1291,7 +1292,7 @@ impl ChainSync {
|
|||||||
let _sender = tx.sender();
|
let _sender = tx.sender();
|
||||||
}
|
}
|
||||||
let mut transaction_queue = self.transaction_queue.lock().unwrap();
|
let mut transaction_queue = self.transaction_queue.lock().unwrap();
|
||||||
transaction_queue.add_all(txs, |a| chain.nonce(a));
|
let _ = transaction_queue.add_all(txs, |a| chain.nonce(a));
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -15,11 +15,11 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
#![warn(missing_docs)]
|
#![warn(missing_docs)]
|
||||||
#![cfg_attr(feature="dev", feature(plugin))]
|
#![cfg_attr(all(nightly, feature="dev"), feature(plugin))]
|
||||||
#![cfg_attr(feature="dev", plugin(clippy))]
|
#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))]
|
||||||
|
|
||||||
// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref.
|
// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref.
|
||||||
#![cfg_attr(feature="dev", allow(clone_on_copy))]
|
#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))]
|
||||||
|
|
||||||
//! Blockchain sync module
|
//! Blockchain sync module
|
||||||
//! Implements ethereum protocol version 63 as specified here:
|
//! Implements ethereum protocol version 63 as specified here:
|
||||||
|
@ -42,6 +42,8 @@ pub trait RangeCollection<K, V> {
|
|||||||
fn remove_head(&mut self, start: &K);
|
fn remove_head(&mut self, start: &K);
|
||||||
/// Remove all elements >= `start` in the range that contains `start`
|
/// Remove all elements >= `start` in the range that contains `start`
|
||||||
fn remove_tail(&mut self, start: &K);
|
fn remove_tail(&mut self, start: &K);
|
||||||
|
/// Remove all elements >= `start`
|
||||||
|
fn remove_from(&mut self, start: &K);
|
||||||
/// Remove all elements >= `tail`
|
/// Remove all elements >= `tail`
|
||||||
fn insert_item(&mut self, key: K, value: V);
|
fn insert_item(&mut self, key: K, value: V);
|
||||||
/// Get an iterator over ranges
|
/// Get an iterator over ranges
|
||||||
@ -137,6 +139,28 @@ impl<K, V> RangeCollection<K, V> for Vec<(K, Vec<V>)> where K: Ord + PartialEq +
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Remove the element and all following it.
|
||||||
|
fn remove_from(&mut self, key: &K) {
|
||||||
|
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
|
||||||
|
Ok(index) => { self.drain(.. index + 1); },
|
||||||
|
Err(index) =>{
|
||||||
|
let mut empty = false;
|
||||||
|
match self.get_mut(index) {
|
||||||
|
Some(&mut (ref k, ref mut v)) if k <= key && (*k + FromUsize::from_usize(v.len())) > *key => {
|
||||||
|
v.truncate((*key - *k).to_usize());
|
||||||
|
empty = v.is_empty();
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
if empty {
|
||||||
|
self.drain(.. index + 1);
|
||||||
|
} else {
|
||||||
|
self.drain(.. index);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Remove range elements up to key
|
/// Remove range elements up to key
|
||||||
fn remove_head(&mut self, key: &K) {
|
fn remove_head(&mut self, key: &K) {
|
||||||
if *key == FromUsize::from_usize(0) {
|
if *key == FromUsize::from_usize(0) {
|
||||||
@ -207,7 +231,7 @@ impl<K, V> RangeCollection<K, V> for Vec<(K, Vec<V>)> where K: Ord + PartialEq +
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
||||||
fn test_range() {
|
fn test_range() {
|
||||||
use std::cmp::{Ordering};
|
use std::cmp::{Ordering};
|
||||||
|
|
||||||
@ -272,5 +296,17 @@ fn test_range() {
|
|||||||
assert_eq!(r.range_iter().cmp(vec![(2, &['b'][..])]), Ordering::Equal);
|
assert_eq!(r.range_iter().cmp(vec![(2, &['b'][..])]), Ordering::Equal);
|
||||||
r.remove_tail(&2);
|
r.remove_tail(&2);
|
||||||
assert_eq!(r.range_iter().next(), None);
|
assert_eq!(r.range_iter().next(), None);
|
||||||
|
|
||||||
|
let mut r = ranges.clone();
|
||||||
|
r.remove_from(&20);
|
||||||
|
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
|
||||||
|
r.remove_from(&17);
|
||||||
|
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p'][..])]), Ordering::Equal);
|
||||||
|
r.remove_from(&15);
|
||||||
|
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..])]), Ordering::Equal);
|
||||||
|
r.remove_from(&3);
|
||||||
|
assert_eq!(r.range_iter().cmp(vec![(2, &['b'][..])]), Ordering::Equal);
|
||||||
|
r.remove_from(&2);
|
||||||
|
assert_eq!(r.range_iter().next(), None);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -85,6 +85,7 @@ use util::numbers::{Uint, U256};
|
|||||||
use util::hash::{Address, H256};
|
use util::hash::{Address, H256};
|
||||||
use util::table::*;
|
use util::table::*;
|
||||||
use ethcore::transaction::*;
|
use ethcore::transaction::*;
|
||||||
|
use ethcore::error::Error;
|
||||||
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
@ -151,10 +152,11 @@ struct VerifiedTransaction {
|
|||||||
transaction: SignedTransaction
|
transaction: SignedTransaction
|
||||||
}
|
}
|
||||||
impl VerifiedTransaction {
|
impl VerifiedTransaction {
|
||||||
fn new(transaction: SignedTransaction) -> Self {
|
fn new(transaction: SignedTransaction) -> Result<Self, Error> {
|
||||||
VerifiedTransaction {
|
try!(transaction.sender());
|
||||||
|
Ok(VerifiedTransaction {
|
||||||
transaction: transaction
|
transaction: transaction
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn hash(&self) -> H256 {
|
fn hash(&self) -> H256 {
|
||||||
@ -228,6 +230,8 @@ impl TransactionSet {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Will be used when rpc merged
|
||||||
|
#[allow(dead_code)]
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
/// Current status of the queue
|
/// Current status of the queue
|
||||||
pub struct TransactionQueueStatus {
|
pub struct TransactionQueueStatus {
|
||||||
@ -276,6 +280,8 @@ impl TransactionQueue {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Will be used when rpc merged
|
||||||
|
#[allow(dead_code)]
|
||||||
/// Returns current status for this queue
|
/// Returns current status for this queue
|
||||||
pub fn status(&self) -> TransactionQueueStatus {
|
pub fn status(&self) -> TransactionQueueStatus {
|
||||||
TransactionQueueStatus {
|
TransactionQueueStatus {
|
||||||
@ -285,17 +291,19 @@ impl TransactionQueue {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Adds all signed transactions to queue to be verified and imported
|
/// Adds all signed transactions to queue to be verified and imported
|
||||||
pub fn add_all<T>(&mut self, txs: Vec<SignedTransaction>, fetch_nonce: T)
|
pub fn add_all<T>(&mut self, txs: Vec<SignedTransaction>, fetch_nonce: T) -> Result<(), Error>
|
||||||
where T: Fn(&Address) -> U256 {
|
where T: Fn(&Address) -> U256 {
|
||||||
for tx in txs.into_iter() {
|
for tx in txs.into_iter() {
|
||||||
self.add(tx, &fetch_nonce);
|
try!(self.add(tx, &fetch_nonce));
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add signed transaction to queue to be verified and imported
|
/// Add signed transaction to queue to be verified and imported
|
||||||
pub fn add<T>(&mut self, tx: SignedTransaction, fetch_nonce: &T)
|
pub fn add<T>(&mut self, tx: SignedTransaction, fetch_nonce: &T) -> Result<(), Error>
|
||||||
where T: Fn(&Address) -> U256 {
|
where T: Fn(&Address) -> U256 {
|
||||||
self.import_tx(VerifiedTransaction::new(tx), fetch_nonce);
|
self.import_tx(try!(VerifiedTransaction::new(tx)), fetch_nonce);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Removes all transactions identified by hashes given in slice
|
/// Removes all transactions identified by hashes given in slice
|
||||||
@ -384,7 +392,8 @@ impl TransactionQueue {
|
|||||||
self.future.enforce_limit(&mut self.by_hash);
|
self.future.enforce_limit(&mut self.by_hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Will be used when mining merged
|
||||||
|
#[allow(dead_code)]
|
||||||
/// Returns top transactions from the queue ordered by priority.
|
/// Returns top transactions from the queue ordered by priority.
|
||||||
pub fn top_transactions(&self, size: usize) -> Vec<SignedTransaction> {
|
pub fn top_transactions(&self, size: usize) -> Vec<SignedTransaction> {
|
||||||
self.current.by_priority
|
self.current.by_priority
|
||||||
@ -506,13 +515,8 @@ impl TransactionQueue {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
extern crate rustc_serialize;
|
extern crate rustc_serialize;
|
||||||
use self::rustc_serialize::hex::FromHex;
|
|
||||||
use std::ops::Deref;
|
|
||||||
use std::collections::{HashMap, BTreeSet};
|
|
||||||
use util::crypto::KeyPair;
|
|
||||||
use util::numbers::{U256, Uint};
|
|
||||||
use util::hash::{Address};
|
|
||||||
use util::table::*;
|
use util::table::*;
|
||||||
|
use util::*;
|
||||||
use ethcore::transaction::*;
|
use ethcore::transaction::*;
|
||||||
use super::*;
|
use super::*;
|
||||||
use super::{TransactionSet, TransactionOrder, VerifiedTransaction};
|
use super::{TransactionSet, TransactionOrder, VerifiedTransaction};
|
||||||
@ -556,12 +560,12 @@ mod test {
|
|||||||
limit: 1
|
limit: 1
|
||||||
};
|
};
|
||||||
let (tx1, tx2) = new_txs(U256::from(1));
|
let (tx1, tx2) = new_txs(U256::from(1));
|
||||||
let tx1 = VerifiedTransaction::new(tx1);
|
let tx1 = VerifiedTransaction::new(tx1).unwrap();
|
||||||
let tx2 = VerifiedTransaction::new(tx2);
|
let tx2 = VerifiedTransaction::new(tx2).unwrap();
|
||||||
let mut by_hash = {
|
let mut by_hash = {
|
||||||
let mut x = HashMap::new();
|
let mut x = HashMap::new();
|
||||||
let tx1 = VerifiedTransaction::new(tx1.transaction.clone());
|
let tx1 = VerifiedTransaction::new(tx1.transaction.clone()).unwrap();
|
||||||
let tx2 = VerifiedTransaction::new(tx2.transaction.clone());
|
let tx2 = VerifiedTransaction::new(tx2.transaction.clone()).unwrap();
|
||||||
x.insert(tx1.hash(), tx1);
|
x.insert(tx1.hash(), tx1);
|
||||||
x.insert(tx2.hash(), tx2);
|
x.insert(tx2.hash(), tx2);
|
||||||
x
|
x
|
||||||
@ -595,13 +599,39 @@ mod test {
|
|||||||
let tx = new_tx();
|
let tx = new_tx();
|
||||||
|
|
||||||
// when
|
// when
|
||||||
txq.add(tx, &default_nonce);
|
let res = txq.add(tx, &default_nonce);
|
||||||
|
|
||||||
// then
|
// then
|
||||||
|
assert!(res.is_ok());
|
||||||
let stats = txq.status();
|
let stats = txq.status();
|
||||||
assert_eq!(stats.pending, 1);
|
assert_eq!(stats.pending, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn should_reject_incorectly_signed_transaction() {
|
||||||
|
// given
|
||||||
|
let mut txq = TransactionQueue::new();
|
||||||
|
let tx = new_unsigned_tx(U256::from(123));
|
||||||
|
let stx = {
|
||||||
|
let mut s = RlpStream::new_list(9);
|
||||||
|
s.append(&tx.nonce);
|
||||||
|
s.append(&tx.gas_price);
|
||||||
|
s.append(&tx.gas);
|
||||||
|
s.append_empty_data(); // action=create
|
||||||
|
s.append(&tx.value);
|
||||||
|
s.append(&tx.data);
|
||||||
|
s.append(&0u64); // v
|
||||||
|
s.append(&U256::zero()); // r
|
||||||
|
s.append(&U256::zero()); // s
|
||||||
|
decode(s.as_raw())
|
||||||
|
};
|
||||||
|
// when
|
||||||
|
let res = txq.add(stx, &default_nonce);
|
||||||
|
|
||||||
|
// then
|
||||||
|
assert!(res.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn should_import_txs_from_same_sender() {
|
fn should_import_txs_from_same_sender() {
|
||||||
// given
|
// given
|
||||||
@ -610,8 +640,8 @@ mod test {
|
|||||||
let (tx, tx2) = new_txs(U256::from(1));
|
let (tx, tx2) = new_txs(U256::from(1));
|
||||||
|
|
||||||
// when
|
// when
|
||||||
txq.add(tx.clone(), &default_nonce);
|
txq.add(tx.clone(), &default_nonce).unwrap();
|
||||||
txq.add(tx2.clone(), &default_nonce);
|
txq.add(tx2.clone(), &default_nonce).unwrap();
|
||||||
|
|
||||||
// then
|
// then
|
||||||
let top = txq.top_transactions(5);
|
let top = txq.top_transactions(5);
|
||||||
@ -628,8 +658,8 @@ mod test {
|
|||||||
let (tx, tx2) = new_txs(U256::from(2));
|
let (tx, tx2) = new_txs(U256::from(2));
|
||||||
|
|
||||||
// when
|
// when
|
||||||
txq.add(tx.clone(), &default_nonce);
|
txq.add(tx.clone(), &default_nonce).unwrap();
|
||||||
txq.add(tx2.clone(), &default_nonce);
|
txq.add(tx2.clone(), &default_nonce).unwrap();
|
||||||
|
|
||||||
// then
|
// then
|
||||||
let stats = txq.status();
|
let stats = txq.status();
|
||||||
@ -650,13 +680,13 @@ mod test {
|
|||||||
let tx1 = new_unsigned_tx(U256::from(124)).sign(&secret);
|
let tx1 = new_unsigned_tx(U256::from(124)).sign(&secret);
|
||||||
let tx2 = new_unsigned_tx(U256::from(125)).sign(&secret);
|
let tx2 = new_unsigned_tx(U256::from(125)).sign(&secret);
|
||||||
|
|
||||||
txq.add(tx, &default_nonce);
|
txq.add(tx, &default_nonce).unwrap();
|
||||||
assert_eq!(txq.status().pending, 1);
|
assert_eq!(txq.status().pending, 1);
|
||||||
txq.add(tx2, &default_nonce);
|
txq.add(tx2, &default_nonce).unwrap();
|
||||||
assert_eq!(txq.status().future, 1);
|
assert_eq!(txq.status().future, 1);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
txq.add(tx1, &default_nonce);
|
txq.add(tx1, &default_nonce).unwrap();
|
||||||
|
|
||||||
// then
|
// then
|
||||||
let stats = txq.status();
|
let stats = txq.status();
|
||||||
@ -669,8 +699,8 @@ mod test {
|
|||||||
// given
|
// given
|
||||||
let mut txq2 = TransactionQueue::new();
|
let mut txq2 = TransactionQueue::new();
|
||||||
let (tx, tx2) = new_txs(U256::from(3));
|
let (tx, tx2) = new_txs(U256::from(3));
|
||||||
txq2.add(tx.clone(), &default_nonce);
|
txq2.add(tx.clone(), &default_nonce).unwrap();
|
||||||
txq2.add(tx2.clone(), &default_nonce);
|
txq2.add(tx2.clone(), &default_nonce).unwrap();
|
||||||
assert_eq!(txq2.status().pending, 1);
|
assert_eq!(txq2.status().pending, 1);
|
||||||
assert_eq!(txq2.status().future, 1);
|
assert_eq!(txq2.status().future, 1);
|
||||||
|
|
||||||
@ -691,10 +721,10 @@ mod test {
|
|||||||
let mut txq = TransactionQueue::new();
|
let mut txq = TransactionQueue::new();
|
||||||
let (tx, tx2) = new_txs(U256::from(1));
|
let (tx, tx2) = new_txs(U256::from(1));
|
||||||
let tx3 = new_tx();
|
let tx3 = new_tx();
|
||||||
txq.add(tx2.clone(), &default_nonce);
|
txq.add(tx2.clone(), &default_nonce).unwrap();
|
||||||
assert_eq!(txq.status().future, 1);
|
assert_eq!(txq.status().future, 1);
|
||||||
txq.add(tx3.clone(), &default_nonce);
|
txq.add(tx3.clone(), &default_nonce).unwrap();
|
||||||
txq.add(tx.clone(), &default_nonce);
|
txq.add(tx.clone(), &default_nonce).unwrap();
|
||||||
assert_eq!(txq.status().pending, 3);
|
assert_eq!(txq.status().pending, 3);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
@ -713,8 +743,8 @@ mod test {
|
|||||||
let (tx, tx2) = new_txs(U256::one());
|
let (tx, tx2) = new_txs(U256::one());
|
||||||
|
|
||||||
// add
|
// add
|
||||||
txq.add(tx2.clone(), &default_nonce);
|
txq.add(tx2.clone(), &default_nonce).unwrap();
|
||||||
txq.add(tx.clone(), &default_nonce);
|
txq.add(tx.clone(), &default_nonce).unwrap();
|
||||||
let stats = txq.status();
|
let stats = txq.status();
|
||||||
assert_eq!(stats.pending, 2);
|
assert_eq!(stats.pending, 2);
|
||||||
|
|
||||||
@ -731,11 +761,11 @@ mod test {
|
|||||||
// given
|
// given
|
||||||
let mut txq = TransactionQueue::with_limits(1, 1);
|
let mut txq = TransactionQueue::with_limits(1, 1);
|
||||||
let (tx, tx2) = new_txs(U256::one());
|
let (tx, tx2) = new_txs(U256::one());
|
||||||
txq.add(tx.clone(), &default_nonce);
|
txq.add(tx.clone(), &default_nonce).unwrap();
|
||||||
assert_eq!(txq.status().pending, 1);
|
assert_eq!(txq.status().pending, 1);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
txq.add(tx2.clone(), &default_nonce);
|
txq.add(tx2.clone(), &default_nonce).unwrap();
|
||||||
|
|
||||||
// then
|
// then
|
||||||
let t = txq.top_transactions(2);
|
let t = txq.top_transactions(2);
|
||||||
@ -749,14 +779,14 @@ mod test {
|
|||||||
let mut txq = TransactionQueue::with_limits(10, 1);
|
let mut txq = TransactionQueue::with_limits(10, 1);
|
||||||
let (tx1, tx2) = new_txs(U256::from(4));
|
let (tx1, tx2) = new_txs(U256::from(4));
|
||||||
let (tx3, tx4) = new_txs(U256::from(4));
|
let (tx3, tx4) = new_txs(U256::from(4));
|
||||||
txq.add(tx1.clone(), &default_nonce);
|
txq.add(tx1.clone(), &default_nonce).unwrap();
|
||||||
txq.add(tx3.clone(), &default_nonce);
|
txq.add(tx3.clone(), &default_nonce).unwrap();
|
||||||
assert_eq!(txq.status().pending, 2);
|
assert_eq!(txq.status().pending, 2);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
txq.add(tx2.clone(), &default_nonce);
|
txq.add(tx2.clone(), &default_nonce).unwrap();
|
||||||
assert_eq!(txq.status().future, 1);
|
assert_eq!(txq.status().future, 1);
|
||||||
txq.add(tx4.clone(), &default_nonce);
|
txq.add(tx4.clone(), &default_nonce).unwrap();
|
||||||
|
|
||||||
// then
|
// then
|
||||||
assert_eq!(txq.status().future, 1);
|
assert_eq!(txq.status().future, 1);
|
||||||
@ -770,7 +800,7 @@ mod test {
|
|||||||
let fetch_last_nonce = |_a: &Address| last_nonce;
|
let fetch_last_nonce = |_a: &Address| last_nonce;
|
||||||
|
|
||||||
// when
|
// when
|
||||||
txq.add(tx, &fetch_last_nonce);
|
txq.add(tx, &fetch_last_nonce).unwrap();
|
||||||
|
|
||||||
// then
|
// then
|
||||||
let stats = txq.status();
|
let stats = txq.status();
|
||||||
@ -784,12 +814,12 @@ mod test {
|
|||||||
let nonce = |a: &Address| default_nonce(a) + U256::one();
|
let nonce = |a: &Address| default_nonce(a) + U256::one();
|
||||||
let mut txq = TransactionQueue::new();
|
let mut txq = TransactionQueue::new();
|
||||||
let (_tx1, tx2) = new_txs(U256::from(1));
|
let (_tx1, tx2) = new_txs(U256::from(1));
|
||||||
txq.add(tx2.clone(), &default_nonce);
|
txq.add(tx2.clone(), &default_nonce).unwrap();
|
||||||
assert_eq!(txq.status().future, 1);
|
assert_eq!(txq.status().future, 1);
|
||||||
assert_eq!(txq.status().pending, 0);
|
assert_eq!(txq.status().pending, 0);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
txq.add(tx2.clone(), &nonce);
|
txq.add(tx2.clone(), &nonce).unwrap();
|
||||||
|
|
||||||
// then
|
// then
|
||||||
let stats = txq.status();
|
let stats = txq.status();
|
||||||
@ -802,15 +832,15 @@ mod test {
|
|||||||
// given
|
// given
|
||||||
let mut txq = TransactionQueue::new();
|
let mut txq = TransactionQueue::new();
|
||||||
let (tx1, tx2) = new_txs(U256::from(1));
|
let (tx1, tx2) = new_txs(U256::from(1));
|
||||||
txq.add(tx1.clone(), &default_nonce);
|
txq.add(tx1.clone(), &default_nonce).unwrap();
|
||||||
txq.add(tx2.clone(), &default_nonce);
|
txq.add(tx2.clone(), &default_nonce).unwrap();
|
||||||
assert_eq!(txq.status().pending, 2);
|
assert_eq!(txq.status().pending, 2);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
txq.remove(&tx1.hash(), &default_nonce);
|
txq.remove(&tx1.hash(), &default_nonce);
|
||||||
assert_eq!(txq.status().pending, 0);
|
assert_eq!(txq.status().pending, 0);
|
||||||
assert_eq!(txq.status().future, 1);
|
assert_eq!(txq.status().future, 1);
|
||||||
txq.add(tx1.clone(), &default_nonce);
|
txq.add(tx1.clone(), &default_nonce).unwrap();
|
||||||
|
|
||||||
// then
|
// then
|
||||||
let stats = txq.status();
|
let stats = txq.status();
|
||||||
@ -825,10 +855,10 @@ mod test {
|
|||||||
let mut txq = TransactionQueue::new();
|
let mut txq = TransactionQueue::new();
|
||||||
let (tx, tx2) = new_txs(U256::from(1));
|
let (tx, tx2) = new_txs(U256::from(1));
|
||||||
let tx3 = new_tx();
|
let tx3 = new_tx();
|
||||||
txq.add(tx2.clone(), &default_nonce);
|
txq.add(tx2.clone(), &default_nonce).unwrap();
|
||||||
assert_eq!(txq.status().future, 1);
|
assert_eq!(txq.status().future, 1);
|
||||||
txq.add(tx3.clone(), &default_nonce);
|
txq.add(tx3.clone(), &default_nonce).unwrap();
|
||||||
txq.add(tx.clone(), &default_nonce);
|
txq.add(tx.clone(), &default_nonce).unwrap();
|
||||||
assert_eq!(txq.status().pending, 3);
|
assert_eq!(txq.status().pending, 3);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
@ -853,8 +883,8 @@ mod test {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// when
|
// when
|
||||||
txq.add(tx, &default_nonce);
|
txq.add(tx, &default_nonce).unwrap();
|
||||||
txq.add(tx2, &default_nonce);
|
txq.add(tx2, &default_nonce).unwrap();
|
||||||
|
|
||||||
// then
|
// then
|
||||||
let stats = txq.status();
|
let stats = txq.status();
|
||||||
@ -881,10 +911,10 @@ mod test {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// when
|
// when
|
||||||
txq.add(tx1, &default_nonce);
|
txq.add(tx1, &default_nonce).unwrap();
|
||||||
txq.add(tx2, &default_nonce);
|
txq.add(tx2, &default_nonce).unwrap();
|
||||||
assert_eq!(txq.status().future, 1);
|
assert_eq!(txq.status().future, 1);
|
||||||
txq.add(tx0, &default_nonce);
|
txq.add(tx0, &default_nonce).unwrap();
|
||||||
|
|
||||||
// then
|
// then
|
||||||
let stats = txq.status();
|
let stats = txq.status();
|
||||||
@ -900,8 +930,8 @@ mod test {
|
|||||||
let next_nonce = |a: &Address| default_nonce(a) + U256::one();
|
let next_nonce = |a: &Address| default_nonce(a) + U256::one();
|
||||||
let mut txq = TransactionQueue::new();
|
let mut txq = TransactionQueue::new();
|
||||||
let (tx1, tx2) = new_txs(U256::one());
|
let (tx1, tx2) = new_txs(U256::one());
|
||||||
txq.add(tx1.clone(), &previous_nonce);
|
txq.add(tx1.clone(), &previous_nonce).unwrap();
|
||||||
txq.add(tx2, &previous_nonce);
|
txq.add(tx2, &previous_nonce).unwrap();
|
||||||
assert_eq!(txq.status().future, 2);
|
assert_eq!(txq.status().future, 2);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
|
@ -40,7 +40,7 @@ chrono = "0.2"
|
|||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = []
|
default = []
|
||||||
dev = ["clippy"]
|
dev = []
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
vergen = "*"
|
vergen = "*"
|
||||||
|
@ -1103,7 +1103,7 @@ macro_rules! construct_uint {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(derive_hash_xor_eq))] // We are pretty sure it's ok.
|
#[cfg_attr(all(nightly, feature="dev"), allow(derive_hash_xor_eq))] // We are pretty sure it's ok.
|
||||||
impl Hash for $name {
|
impl Hash for $name {
|
||||||
fn hash<H>(&self, state: &mut H) where H: Hasher {
|
fn hash<H>(&self, state: &mut H) where H: Hasher {
|
||||||
unsafe { state.write(::std::slice::from_raw_parts(self.0.as_ptr() as *mut u8, self.0.len() * 8)); }
|
unsafe { state.write(::std::slice::from_raw_parts(self.0.as_ptr() as *mut u8, self.0.len() * 8)); }
|
||||||
@ -1485,7 +1485,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[cfg_attr(feature="dev", allow(eq_op))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(eq_op))]
|
||||||
pub fn uint256_comp_test() {
|
pub fn uint256_comp_test() {
|
||||||
let small = U256([10u64, 0, 0, 0]);
|
let small = U256([10u64, 0, 0, 0]);
|
||||||
let big = U256([0x8C8C3EE70C644118u64, 0x0209E7378231E632, 0, 0]);
|
let big = U256([0x8C8C3EE70C644118u64, 0x0209E7378231E632, 0, 0]);
|
||||||
@ -2032,7 +2032,7 @@ mod tests {
|
|||||||
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[cfg_attr(feature = "dev", allow(cyclomatic_complexity))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
||||||
fn u256_multi_full_mul() {
|
fn u256_multi_full_mul() {
|
||||||
let result = U256([0, 0, 0, 0]).full_mul(U256([0, 0, 0, 0]));
|
let result = U256([0, 0, 0, 0]).full_mul(U256([0, 0, 0, 0]));
|
||||||
assert_eq!(U512([0, 0, 0, 0, 0, 0, 0, 0]), result);
|
assert_eq!(U512([0, 0, 0, 0, 0, 0, 0, 0]), result);
|
||||||
|
@ -1,7 +1,28 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
extern crate rustc_version;
|
||||||
extern crate vergen;
|
extern crate vergen;
|
||||||
|
|
||||||
use vergen::*;
|
use vergen::*;
|
||||||
|
use rustc_version::{version_meta, Channel};
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
vergen(OutputFns::all()).unwrap();
|
vergen(OutputFns::all()).unwrap();
|
||||||
|
if let Channel::Nightly = version_meta().channel {
|
||||||
|
println!("cargo:rustc-cfg=nightly");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -305,7 +305,7 @@ macro_rules! impl_hash {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Copy for $from {}
|
impl Copy for $from {}
|
||||||
#[cfg_attr(feature="dev", allow(expl_impl_clone_on_copy))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(expl_impl_clone_on_copy))]
|
||||||
impl Clone for $from {
|
impl Clone for $from {
|
||||||
fn clone(&self) -> $from {
|
fn clone(&self) -> $from {
|
||||||
unsafe {
|
unsafe {
|
||||||
@ -637,7 +637,7 @@ mod tests {
|
|||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[cfg_attr(feature="dev", allow(eq_op))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(eq_op))]
|
||||||
fn hash() {
|
fn hash() {
|
||||||
let h = H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]);
|
let h = H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]);
|
||||||
assert_eq!(H64::from_str("0123456789abcdef").unwrap(), h);
|
assert_eq!(H64::from_str("0123456789abcdef").unwrap(), h);
|
||||||
|
@ -158,7 +158,7 @@ impl JournalDB {
|
|||||||
backing.get(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some()
|
backing.get(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn insert_keys(inserts: &Vec<(H256, Bytes)>, backing: &Database, counters: &mut HashMap<H256, i32>, batch: &DBTransaction) {
|
fn insert_keys(inserts: &[(H256, Bytes)], backing: &Database, counters: &mut HashMap<H256, i32>, batch: &DBTransaction) {
|
||||||
for &(ref h, ref d) in inserts {
|
for &(ref h, ref d) in inserts {
|
||||||
if let Some(c) = counters.get_mut(h) {
|
if let Some(c) = counters.get_mut(h) {
|
||||||
// already counting. increment.
|
// already counting. increment.
|
||||||
@ -181,7 +181,7 @@ impl JournalDB {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn replay_keys(inserts: &Vec<H256>, backing: &Database, counters: &mut HashMap<H256, i32>) {
|
fn replay_keys(inserts: &[H256], backing: &Database, counters: &mut HashMap<H256, i32>) {
|
||||||
trace!("replay_keys: inserts={:?}, counters={:?}", inserts, counters);
|
trace!("replay_keys: inserts={:?}, counters={:?}", inserts, counters);
|
||||||
for h in inserts {
|
for h in inserts {
|
||||||
if let Some(c) = counters.get_mut(h) {
|
if let Some(c) = counters.get_mut(h) {
|
||||||
@ -211,12 +211,12 @@ impl JournalDB {
|
|||||||
n = Some(*c);
|
n = Some(*c);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
match &n {
|
match n {
|
||||||
&Some(i) if i == 1 => {
|
Some(i) if i == 1 => {
|
||||||
counters.remove(&h);
|
counters.remove(&h);
|
||||||
Self::reset_already_in(batch, &h);
|
Self::reset_already_in(batch, &h);
|
||||||
}
|
}
|
||||||
&None => {
|
None => {
|
||||||
// Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs.
|
// Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs.
|
||||||
//assert!(!Self::is_already_in(db, &h));
|
//assert!(!Self::is_already_in(db, &h));
|
||||||
batch.delete(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?");
|
batch.delete(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?");
|
||||||
@ -295,7 +295,7 @@ impl JournalDB {
|
|||||||
let drained = self.overlay.drain();
|
let drained = self.overlay.drain();
|
||||||
let removes: Vec<H256> = drained
|
let removes: Vec<H256> = drained
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|(ref k, &(_, ref c))| if *c < 0 {Some(k.clone())} else {None}).cloned()
|
.filter_map(|(k, &(_, c))| if c < 0 {Some(k.clone())} else {None})
|
||||||
.collect();
|
.collect();
|
||||||
let inserts: Vec<(H256, Bytes)> = drained
|
let inserts: Vec<(H256, Bytes)> = drained
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@ -382,7 +382,10 @@ impl JournalDB {
|
|||||||
|
|
||||||
/// Returns heap memory size used
|
/// Returns heap memory size used
|
||||||
pub fn mem_used(&self) -> usize {
|
pub fn mem_used(&self) -> usize {
|
||||||
self.overlay.mem_used() + match &self.counters { &Some(ref c) => c.read().unwrap().heap_size_of_children(), &None => 0 }
|
self.overlay.mem_used() + match self.counters {
|
||||||
|
Some(ref c) => c.read().unwrap().heap_size_of_children(),
|
||||||
|
None => 0
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -614,7 +617,6 @@ mod tests {
|
|||||||
fn reopen_remove() {
|
fn reopen_remove() {
|
||||||
let mut dir = ::std::env::temp_dir();
|
let mut dir = ::std::env::temp_dir();
|
||||||
dir.push(H32::random().hex());
|
dir.push(H32::random().hex());
|
||||||
let bar = H256::random();
|
|
||||||
|
|
||||||
let foo = {
|
let foo = {
|
||||||
let mut jdb = JournalDB::new(dir.to_str().unwrap());
|
let mut jdb = JournalDB::new(dir.to_str().unwrap());
|
||||||
|
@ -84,6 +84,7 @@ impl SecretStore {
|
|||||||
let mut path = ::std::env::home_dir().expect("Failed to get home dir");
|
let mut path = ::std::env::home_dir().expect("Failed to get home dir");
|
||||||
path.push(".parity");
|
path.push(".parity");
|
||||||
path.push("keys");
|
path.push("keys");
|
||||||
|
::std::fs::create_dir_all(&path).expect("Should panic since it is critical to be able to access home dir");
|
||||||
Self::new_in(&path)
|
Self::new_in(&path)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -55,8 +55,7 @@ pub struct DatabaseIterator<'a> {
|
|||||||
impl<'a> Iterator for DatabaseIterator<'a> {
|
impl<'a> Iterator for DatabaseIterator<'a> {
|
||||||
type Item = (Box<[u8]>, Box<[u8]>);
|
type Item = (Box<[u8]>, Box<[u8]>);
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(type_complexity))]
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
fn next(&mut self) -> Option<(Box<[u8]>, Box<[u8]>)> {
|
|
||||||
self.iter.next()
|
self.iter.next()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,18 +15,18 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
#![warn(missing_docs)]
|
#![warn(missing_docs)]
|
||||||
#![cfg_attr(feature="dev", feature(plugin))]
|
#![cfg_attr(all(nightly, feature="dev"), feature(plugin))]
|
||||||
#![cfg_attr(feature="dev", plugin(clippy))]
|
#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))]
|
||||||
|
|
||||||
// Clippy settings
|
// Clippy settings
|
||||||
// TODO [todr] not really sure
|
// TODO [todr] not really sure
|
||||||
#![cfg_attr(feature="dev", allow(needless_range_loop))]
|
#![cfg_attr(all(nightly, feature="dev"), allow(needless_range_loop))]
|
||||||
// Shorter than if-else
|
// Shorter than if-else
|
||||||
#![cfg_attr(feature="dev", allow(match_bool))]
|
#![cfg_attr(all(nightly, feature="dev"), allow(match_bool))]
|
||||||
// We use that to be more explicit about handled cases
|
// We use that to be more explicit about handled cases
|
||||||
#![cfg_attr(feature="dev", allow(match_same_arms))]
|
#![cfg_attr(all(nightly, feature="dev"), allow(match_same_arms))]
|
||||||
// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref.
|
// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref.
|
||||||
#![cfg_attr(feature="dev", allow(clone_on_copy))]
|
#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))]
|
||||||
|
|
||||||
//! Ethcore-util library
|
//! Ethcore-util library
|
||||||
//!
|
//!
|
||||||
|
@ -243,7 +243,7 @@ impl Discovery {
|
|||||||
self.send_to(packet, address.clone());
|
self.send_to(packet, address.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(map_clone))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(map_clone))]
|
||||||
fn nearest_node_entries(target: &NodeId, buckets: &[NodeBucket]) -> Vec<NodeEntry> {
|
fn nearest_node_entries(target: &NodeId, buckets: &[NodeBucket]) -> Vec<NodeEntry> {
|
||||||
let mut found: BTreeMap<u32, Vec<&NodeEntry>> = BTreeMap::new();
|
let mut found: BTreeMap<u32, Vec<&NodeEntry>> = BTreeMap::new();
|
||||||
let mut count = 0;
|
let mut count = 0;
|
||||||
|
@ -507,7 +507,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
|||||||
debug!(target: "network", "Connecting peers: {} sessions, {} pending", self.session_count(), self.handshake_count());
|
debug!(target: "network", "Connecting peers: {} sessions, {} pending", self.session_count(), self.handshake_count());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(single_match))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(single_match))]
|
||||||
fn connect_peer(&self, id: &NodeId, io: &IoContext<NetworkIoMessage<Message>>) {
|
fn connect_peer(&self, id: &NodeId, io: &IoContext<NetworkIoMessage<Message>>) {
|
||||||
if self.have_session(id)
|
if self.have_session(id)
|
||||||
{
|
{
|
||||||
@ -542,7 +542,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
|||||||
self.create_connection(socket, Some(id), io);
|
self.create_connection(socket, Some(id), io);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(block_in_if_condition_stmt))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(block_in_if_condition_stmt))]
|
||||||
fn create_connection(&self, socket: TcpStream, id: Option<&NodeId>, io: &IoContext<NetworkIoMessage<Message>>) {
|
fn create_connection(&self, socket: TcpStream, id: Option<&NodeId>, io: &IoContext<NetworkIoMessage<Message>>) {
|
||||||
let nonce = self.info.write().unwrap().next_nonce();
|
let nonce = self.info.write().unwrap().next_nonce();
|
||||||
let mut handshakes = self.handshakes.write().unwrap();
|
let mut handshakes = self.handshakes.write().unwrap();
|
||||||
|
@ -71,7 +71,7 @@ impl PanicHandler {
|
|||||||
|
|
||||||
/// Invoke closure and catch any possible panics.
|
/// Invoke closure and catch any possible panics.
|
||||||
/// In case of panic notifies all listeners about it.
|
/// In case of panic notifies all listeners about it.
|
||||||
#[cfg_attr(feature="dev", allow(deprecated))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(deprecated))]
|
||||||
pub fn catch_panic<G, R>(&self, g: G) -> thread::Result<R> where G: FnOnce() -> R + Send + 'static {
|
pub fn catch_panic<G, R>(&self, g: G) -> thread::Result<R> where G: FnOnce() -> R + Send + 'static {
|
||||||
let _guard = PanicGuard { handler: self };
|
let _guard = PanicGuard { handler: self };
|
||||||
let result = g();
|
let result = g();
|
||||||
|
@ -54,7 +54,7 @@ pub struct TrieDB<'db> {
|
|||||||
pub hash_count: usize,
|
pub hash_count: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(wrong_self_convention))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))]
|
||||||
impl<'db> TrieDB<'db> {
|
impl<'db> TrieDB<'db> {
|
||||||
/// Create a new trie with the backing database `db` and `root`
|
/// Create a new trie with the backing database `db` and `root`
|
||||||
/// Panics, if `root` does not exist
|
/// Panics, if `root` does not exist
|
||||||
|
@ -66,7 +66,7 @@ enum MaybeChanged<'a> {
|
|||||||
Changed(Bytes),
|
Changed(Bytes),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(wrong_self_convention))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))]
|
||||||
impl<'db> TrieDBMut<'db> {
|
impl<'db> TrieDBMut<'db> {
|
||||||
/// Create a new trie with the backing database `db` and empty `root`
|
/// Create a new trie with the backing database `db` and empty `root`
|
||||||
/// Initialise to the state entailed by the genesis block.
|
/// Initialise to the state entailed by the genesis block.
|
||||||
@ -350,7 +350,7 @@ impl<'db> TrieDBMut<'db> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
||||||
/// Determine the RLP of the node, assuming we're inserting `partial` into the
|
/// Determine the RLP of the node, assuming we're inserting `partial` into the
|
||||||
/// node currently of data `old`. This will *not* delete any hash of `old` from the database;
|
/// node currently of data `old`. This will *not* delete any hash of `old` from the database;
|
||||||
/// it will just return the new RLP that includes the new node.
|
/// it will just return the new RLP that includes the new node.
|
||||||
|
Loading…
Reference in New Issue
Block a user