Merge branch 'master' into rpc-signing-extend
Conflicts: parity/main.rs
This commit is contained in:
commit
3b21a5f54c
34
Cargo.lock
generated
34
Cargo.lock
generated
@ -15,7 +15,9 @@ dependencies = [
|
||||
"fdlimit 0.1.0",
|
||||
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rpassword 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@ -217,6 +219,7 @@ dependencies = [
|
||||
"num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rust-crypto 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@ -236,10 +239,11 @@ dependencies = [
|
||||
"ethcore 0.9.99",
|
||||
"ethcore-util 0.9.99",
|
||||
"ethsync 0.9.99",
|
||||
"jsonrpc-core 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"jsonrpc-http-server 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"jsonrpc-core 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"jsonrpc-http-server 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_codegen 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -295,6 +299,7 @@ dependencies = [
|
||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@ -409,7 +414,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "jsonrpc-core"
|
||||
version = "1.2.0"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -420,11 +425,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "jsonrpc-http-server"
|
||||
version = "2.1.0"
|
||||
version = "3.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"hyper 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"jsonrpc-core 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"jsonrpc-core 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicase 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@ -699,6 +704,17 @@ dependencies = [
|
||||
"librocksdb-sys 0.2.1 (git+https://github.com/arkpar/rust-rocksdb.git)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rpassword"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termios 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rust-crypto"
|
||||
version = "0.2.34"
|
||||
@ -832,6 +848,14 @@ dependencies = [
|
||||
"winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "termios"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "time"
|
||||
version = "0.1.34"
|
||||
|
28
Cargo.toml
28
Cargo.toml
@ -4,6 +4,10 @@ name = "parity"
|
||||
version = "0.9.99"
|
||||
license = "GPL-3.0"
|
||||
authors = ["Ethcore <admin@ethcore.io>"]
|
||||
build = "build.rs"
|
||||
|
||||
[build-dependencies]
|
||||
rustc_version = "0.1"
|
||||
|
||||
[dependencies]
|
||||
log = "0.3"
|
||||
@ -12,22 +16,30 @@ rustc-serialize = "0.3"
|
||||
docopt = "0.6"
|
||||
time = "0.1"
|
||||
ctrlc = { git = "https://github.com/tomusdrw/rust-ctrlc.git" }
|
||||
clippy = { version = "0.0.44", optional = true }
|
||||
ethcore-util = { path = "util" }
|
||||
ethcore = { path = "ethcore" }
|
||||
ethsync = { path = "sync" }
|
||||
ethcore-rpc = { path = "rpc", optional = true }
|
||||
fdlimit = { path = "util/fdlimit" }
|
||||
daemonize = "0.2"
|
||||
ethcore-devtools = { path = "devtools" }
|
||||
number_prefix = "0.2"
|
||||
clippy = { version = "0.0.44", optional = true }
|
||||
ethcore = { path = "ethcore" }
|
||||
ethcore-util = { path = "util" }
|
||||
ethsync = { path = "sync" }
|
||||
ethcore-devtools = { path = "devtools" }
|
||||
ethcore-rpc = { path = "rpc", optional = true }
|
||||
rpassword = "0.1"
|
||||
|
||||
[dev-dependencies]
|
||||
ethcore = { path = "ethcore", features = ["dev"] }
|
||||
ethcore-util = { path = "util", features = ["dev"] }
|
||||
ethsync = { path = "sync", features = ["dev"] }
|
||||
ethcore-rpc = { path = "rpc", features = ["dev"] }
|
||||
|
||||
[features]
|
||||
default = ["rpc"]
|
||||
rpc = ["ethcore-rpc"]
|
||||
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev"]
|
||||
dev = ["ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev"]
|
||||
dev-clippy = ["clippy", "ethcore/clippy", "ethcore-util/clippy", "ethsync/clippy", "ethcore-rpc/clippy"]
|
||||
travis-beta = ["ethcore/json-tests"]
|
||||
travis-nightly = ["ethcore/json-tests", "dev"]
|
||||
travis-nightly = ["ethcore/json-tests", "dev-clippy", "dev"]
|
||||
|
||||
[[bin]]
|
||||
path = "parity/main.rs"
|
||||
|
25
build.rs
Normal file
25
build.rs
Normal file
@ -0,0 +1,25 @@
|
||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
extern crate rustc_version;
|
||||
|
||||
use rustc_version::{version_meta, Channel};
|
||||
|
||||
fn main() {
|
||||
if let Channel::Nightly = version_meta().channel {
|
||||
println!("cargo:rustc-cfg=nightly");
|
||||
}
|
||||
}
|
@ -5,6 +5,10 @@ license = "GPL-3.0"
|
||||
name = "ethcore"
|
||||
version = "0.9.99"
|
||||
authors = ["Ethcore <admin@ethcore.io>"]
|
||||
build = "build.rs"
|
||||
|
||||
[build-dependencies]
|
||||
rustc_version = "0.1"
|
||||
|
||||
[dependencies]
|
||||
log = "0.3"
|
||||
@ -27,5 +31,5 @@ jit = ["evmjit"]
|
||||
evm-debug = []
|
||||
json-tests = []
|
||||
test-heavy = []
|
||||
dev = ["clippy"]
|
||||
dev = []
|
||||
default = []
|
||||
|
25
ethcore/build.rs
Normal file
25
ethcore/build.rs
Normal file
@ -0,0 +1,25 @@
|
||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
extern crate rustc_version;
|
||||
|
||||
use rustc_version::{version_meta, Channel};
|
||||
|
||||
fn main() {
|
||||
if let Channel::Nightly = version_meta().channel {
|
||||
println!("cargo:rustc-cfg=nightly");
|
||||
}
|
||||
}
|
@ -24,7 +24,7 @@ pub type LogBloom = H2048;
|
||||
/// Constant 2048-bit datum for 0. Often used as a default.
|
||||
pub static ZERO_LOGBLOOM: LogBloom = H2048([0x00; 256]);
|
||||
|
||||
#[cfg_attr(feature="dev", allow(enum_variant_names))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(enum_variant_names))]
|
||||
/// Semantic boolean for when a seal/signature is included.
|
||||
pub enum Seal {
|
||||
/// The seal/signature is included.
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
//! Blockchain block.
|
||||
|
||||
#![cfg_attr(feature="dev", allow(ptr_arg))] // Because of &LastHashes -> &Vec<_>
|
||||
#![cfg_attr(all(nightly, feature="dev"), allow(ptr_arg))] // Because of &LastHashes -> &Vec<_>
|
||||
|
||||
use common::*;
|
||||
use engine::*;
|
||||
@ -274,7 +274,7 @@ impl<'x> OpenBlock<'x> {
|
||||
s.block.base.header.note_dirty();
|
||||
|
||||
ClosedBlock {
|
||||
block: s.block,
|
||||
block: s.block,
|
||||
uncle_bytes: uncle_bytes,
|
||||
}
|
||||
}
|
||||
|
@ -121,7 +121,7 @@ struct QueueSignal {
|
||||
}
|
||||
|
||||
impl QueueSignal {
|
||||
#[cfg_attr(feature="dev", allow(bool_comparison))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(bool_comparison))]
|
||||
fn set(&self) {
|
||||
if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false {
|
||||
self.message_channel.send(UserMessage(SyncMessage::BlockVerified)).expect("Error sending BlockVerified message");
|
||||
@ -320,6 +320,9 @@ impl BlockQueue {
|
||||
|
||||
/// Mark given block and all its children as bad. Stops verification.
|
||||
pub fn mark_as_bad(&mut self, block_hashes: &[H256]) {
|
||||
if block_hashes.is_empty() {
|
||||
return;
|
||||
}
|
||||
let mut verification_lock = self.verification.lock().unwrap();
|
||||
let mut processing = self.processing.write().unwrap();
|
||||
|
||||
@ -345,6 +348,9 @@ impl BlockQueue {
|
||||
|
||||
/// Mark given block as processed
|
||||
pub fn mark_as_good(&mut self, block_hashes: &[H256]) {
|
||||
if block_hashes.is_empty() {
|
||||
return;
|
||||
}
|
||||
let mut processing = self.processing.write().unwrap();
|
||||
for hash in block_hashes {
|
||||
processing.remove(&hash);
|
||||
@ -385,7 +391,7 @@ impl BlockQueue {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn collect_garbage(&self) {
|
||||
pub fn collect_garbage(&self) {
|
||||
{
|
||||
let mut verification = self.verification.lock().unwrap();
|
||||
verification.unverified.shrink_to_fit();
|
||||
|
@ -18,6 +18,7 @@ use util::numbers::{U256,H256};
|
||||
use header::BlockNumber;
|
||||
|
||||
/// Brief info about inserted block.
|
||||
#[derive(Clone)]
|
||||
pub struct BlockInfo {
|
||||
/// Block hash.
|
||||
pub hash: H256,
|
||||
@ -30,6 +31,7 @@ pub struct BlockInfo {
|
||||
}
|
||||
|
||||
/// Describes location of newly inserted block.
|
||||
#[derive(Clone)]
|
||||
pub enum BlockLocation {
|
||||
/// It's part of the canon chain.
|
||||
CanonChain,
|
||||
@ -42,6 +44,8 @@ pub enum BlockLocation {
|
||||
/// Hash of the newest common ancestor with old canon chain.
|
||||
ancestor: H256,
|
||||
/// Hashes of the blocks between ancestor and this block.
|
||||
route: Vec<H256>
|
||||
enacted: Vec<H256>,
|
||||
/// Hashes of the blocks which were invalidated.
|
||||
retracted: Vec<H256>,
|
||||
}
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ use blockchain::best_block::BestBlock;
|
||||
use blockchain::bloom_indexer::BloomIndexer;
|
||||
use blockchain::tree_route::TreeRoute;
|
||||
use blockchain::update::ExtrasUpdate;
|
||||
use blockchain::CacheSize;
|
||||
use blockchain::{CacheSize, ImportRoute};
|
||||
|
||||
const BLOOM_INDEX_SIZE: usize = 16;
|
||||
const BLOOM_LEVELS: u8 = 3;
|
||||
@ -414,14 +414,14 @@ impl BlockChain {
|
||||
/// Inserts the block into backing cache database.
|
||||
/// Expects the block to be valid and already verified.
|
||||
/// If the block is already known, does nothing.
|
||||
pub fn insert_block(&self, bytes: &[u8], receipts: Vec<Receipt>) {
|
||||
pub fn insert_block(&self, bytes: &[u8], receipts: Vec<Receipt>) -> ImportRoute {
|
||||
// create views onto rlp
|
||||
let block = BlockView::new(bytes);
|
||||
let header = block.header_view();
|
||||
let hash = header.sha3();
|
||||
|
||||
if self.is_known(&hash) {
|
||||
return;
|
||||
return ImportRoute::none();
|
||||
}
|
||||
|
||||
// store block in db
|
||||
@ -435,8 +435,10 @@ impl BlockChain {
|
||||
block_receipts: self.prepare_block_receipts_update(receipts, &info),
|
||||
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
|
||||
blocks_blooms: self.prepare_block_blooms_update(bytes, &info),
|
||||
info: info
|
||||
info: info.clone(),
|
||||
});
|
||||
|
||||
ImportRoute::from(info)
|
||||
}
|
||||
|
||||
/// Applies extras update.
|
||||
@ -549,9 +551,14 @@ impl BlockChain {
|
||||
|
||||
match route.blocks.len() {
|
||||
0 => BlockLocation::CanonChain,
|
||||
_ => BlockLocation::BranchBecomingCanonChain {
|
||||
ancestor: route.ancestor,
|
||||
route: route.blocks.into_iter().skip(route.index).collect()
|
||||
_ => {
|
||||
let retracted = route.blocks.iter().take(route.index).cloned().collect::<Vec<H256>>();
|
||||
|
||||
BlockLocation::BranchBecomingCanonChain {
|
||||
ancestor: route.ancestor,
|
||||
enacted: route.blocks.into_iter().skip(route.index).collect(),
|
||||
retracted: retracted.into_iter().rev().collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -572,11 +579,11 @@ impl BlockChain {
|
||||
BlockLocation::CanonChain => {
|
||||
block_hashes.insert(number, info.hash.clone());
|
||||
},
|
||||
BlockLocation::BranchBecomingCanonChain { ref ancestor, ref route } => {
|
||||
BlockLocation::BranchBecomingCanonChain { ref ancestor, ref enacted, .. } => {
|
||||
let ancestor_number = self.block_number(ancestor).unwrap();
|
||||
let start_number = ancestor_number + 1;
|
||||
|
||||
for (index, hash) in route.iter().cloned().enumerate() {
|
||||
for (index, hash) in enacted.iter().cloned().enumerate() {
|
||||
block_hashes.insert(start_number + index as BlockNumber, hash);
|
||||
}
|
||||
|
||||
@ -661,11 +668,11 @@ impl BlockChain {
|
||||
ChainFilter::new(self, self.bloom_indexer.index_size(), self.bloom_indexer.levels())
|
||||
.add_bloom(&header.log_bloom(), header.number() as usize)
|
||||
},
|
||||
BlockLocation::BranchBecomingCanonChain { ref ancestor, ref route } => {
|
||||
BlockLocation::BranchBecomingCanonChain { ref ancestor, ref enacted, .. } => {
|
||||
let ancestor_number = self.block_number(ancestor).unwrap();
|
||||
let start_number = ancestor_number + 1;
|
||||
|
||||
let mut blooms: Vec<H2048> = route.iter()
|
||||
let mut blooms: Vec<H2048> = enacted.iter()
|
||||
.map(|hash| self.block(hash).unwrap())
|
||||
.map(|bytes| BlockView::new(&bytes).header_view().log_bloom())
|
||||
.collect();
|
||||
@ -825,7 +832,7 @@ mod tests {
|
||||
use rustc_serialize::hex::FromHex;
|
||||
use util::hash::*;
|
||||
use util::sha3::Hashable;
|
||||
use blockchain::{BlockProvider, BlockChain, BlockChainConfig};
|
||||
use blockchain::{BlockProvider, BlockChain, BlockChainConfig, ImportRoute};
|
||||
use tests::helpers::*;
|
||||
use devtools::*;
|
||||
use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer};
|
||||
@ -884,7 +891,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
||||
fn test_find_uncles() {
|
||||
let mut canon_chain = ChainGenerator::default();
|
||||
let mut finalizer = BlockFinalizer::default();
|
||||
@ -922,7 +929,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
||||
fn test_small_fork() {
|
||||
let mut canon_chain = ChainGenerator::default();
|
||||
let mut finalizer = BlockFinalizer::default();
|
||||
@ -943,10 +950,30 @@ mod tests {
|
||||
|
||||
let temp = RandomTempPath::new();
|
||||
let bc = BlockChain::new(BlockChainConfig::default(), &genesis, temp.as_path());
|
||||
bc.insert_block(&b1, vec![]);
|
||||
bc.insert_block(&b2, vec![]);
|
||||
bc.insert_block(&b3a, vec![]);
|
||||
bc.insert_block(&b3b, vec![]);
|
||||
let ir1 = bc.insert_block(&b1, vec![]);
|
||||
let ir2 = bc.insert_block(&b2, vec![]);
|
||||
let ir3b = bc.insert_block(&b3b, vec![]);
|
||||
let ir3a = bc.insert_block(&b3a, vec![]);
|
||||
|
||||
assert_eq!(ir1, ImportRoute {
|
||||
enacted: vec![b1_hash],
|
||||
retracted: vec![],
|
||||
});
|
||||
|
||||
assert_eq!(ir2, ImportRoute {
|
||||
enacted: vec![b2_hash],
|
||||
retracted: vec![],
|
||||
});
|
||||
|
||||
assert_eq!(ir3b, ImportRoute {
|
||||
enacted: vec![b3b_hash],
|
||||
retracted: vec![],
|
||||
});
|
||||
|
||||
assert_eq!(ir3a, ImportRoute {
|
||||
enacted: vec![b3a_hash],
|
||||
retracted: vec![b3b_hash],
|
||||
});
|
||||
|
||||
assert_eq!(bc.best_block_hash(), best_block_hash);
|
||||
assert_eq!(bc.block_number(&genesis_hash).unwrap(), 0);
|
||||
|
@ -29,7 +29,7 @@ pub trait ChainIterator: Iterator + Sized {
|
||||
/// Blocks generated by fork will have lower difficulty than current chain.
|
||||
fn fork(&self, fork_number: usize) -> Fork<Self> where Self: Clone;
|
||||
/// Should be called to make every consecutive block have given bloom.
|
||||
fn with_bloom<'a>(&'a mut self, bloom: H2048) -> Bloom<'a, Self>;
|
||||
fn with_bloom(&mut self, bloom: H2048) -> Bloom<Self>;
|
||||
/// Should be called to complete block. Without complete, block may have incorrect hash.
|
||||
fn complete<'a>(&'a mut self, finalizer: &'a mut BlockFinalizer) -> Complete<'a, Self>;
|
||||
/// Completes and generates block.
|
||||
@ -44,7 +44,7 @@ impl<I> ChainIterator for I where I: Iterator + Sized {
|
||||
}
|
||||
}
|
||||
|
||||
fn with_bloom<'a>(&'a mut self, bloom: H2048) -> Bloom<'a, Self> {
|
||||
fn with_bloom(&mut self, bloom: H2048) -> Bloom<Self> {
|
||||
Bloom {
|
||||
iter: self,
|
||||
bloom: bloom
|
||||
|
119
ethcore/src/blockchain/import_route.rs
Normal file
119
ethcore/src/blockchain/import_route.rs
Normal file
@ -0,0 +1,119 @@
|
||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Import route.
|
||||
|
||||
use util::hash::H256;
|
||||
use blockchain::block_info::{BlockInfo, BlockLocation};
|
||||
|
||||
/// Import route for newly inserted block.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct ImportRoute {
|
||||
/// Blocks that were invalidated by new block.
|
||||
pub retracted: Vec<H256>,
|
||||
/// Blocks that were validated by new block.
|
||||
pub enacted: Vec<H256>,
|
||||
}
|
||||
|
||||
impl ImportRoute {
|
||||
pub fn none() -> Self {
|
||||
ImportRoute {
|
||||
retracted: vec![],
|
||||
enacted: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BlockInfo> for ImportRoute {
|
||||
fn from(info: BlockInfo) -> ImportRoute {
|
||||
match info.location {
|
||||
BlockLocation::CanonChain => ImportRoute {
|
||||
retracted: vec![],
|
||||
enacted: vec![info.hash],
|
||||
},
|
||||
BlockLocation::Branch => ImportRoute::none(),
|
||||
BlockLocation::BranchBecomingCanonChain { mut enacted, retracted, .. } => {
|
||||
enacted.push(info.hash);
|
||||
ImportRoute {
|
||||
retracted: retracted,
|
||||
enacted: enacted,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use util::hash::H256;
|
||||
use util::numbers::U256;
|
||||
use blockchain::block_info::{BlockInfo, BlockLocation};
|
||||
use blockchain::ImportRoute;
|
||||
|
||||
#[test]
|
||||
fn import_route_none() {
|
||||
assert_eq!(ImportRoute::none(), ImportRoute {
|
||||
enacted: vec![],
|
||||
retracted: vec![],
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn import_route_branch() {
|
||||
let info = BlockInfo {
|
||||
hash: H256::from(U256::from(1)),
|
||||
number: 0,
|
||||
total_difficulty: U256::from(0),
|
||||
location: BlockLocation::Branch,
|
||||
};
|
||||
|
||||
assert_eq!(ImportRoute::from(info), ImportRoute::none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn import_route_canon_chain() {
|
||||
let info = BlockInfo {
|
||||
hash: H256::from(U256::from(1)),
|
||||
number: 0,
|
||||
total_difficulty: U256::from(0),
|
||||
location: BlockLocation::CanonChain,
|
||||
};
|
||||
|
||||
assert_eq!(ImportRoute::from(info), ImportRoute {
|
||||
retracted: vec![],
|
||||
enacted: vec![H256::from(U256::from(1))],
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn import_route_branch_becoming_canon_chain() {
|
||||
let info = BlockInfo {
|
||||
hash: H256::from(U256::from(2)),
|
||||
number: 0,
|
||||
total_difficulty: U256::from(0),
|
||||
location: BlockLocation::BranchBecomingCanonChain {
|
||||
ancestor: H256::from(U256::from(0)),
|
||||
enacted: vec![H256::from(U256::from(1))],
|
||||
retracted: vec![H256::from(U256::from(3)), H256::from(U256::from(4))],
|
||||
}
|
||||
};
|
||||
|
||||
assert_eq!(ImportRoute::from(info), ImportRoute {
|
||||
retracted: vec![H256::from(U256::from(3)), H256::from(U256::from(4))],
|
||||
enacted: vec![H256::from(U256::from(1)), H256::from(U256::from(2))],
|
||||
});
|
||||
}
|
||||
}
|
@ -25,7 +25,9 @@ mod tree_route;
|
||||
mod update;
|
||||
#[cfg(test)]
|
||||
mod generator;
|
||||
mod import_route;
|
||||
|
||||
pub use self::blockchain::{BlockProvider, BlockChain, BlockChainConfig};
|
||||
pub use self::cache::CacheSize;
|
||||
pub use self::tree_route::TreeRoute;
|
||||
pub use self::import_route::ImportRoute;
|
||||
|
@ -87,6 +87,8 @@ pub struct ClientConfig {
|
||||
pub blockchain: BlockChainConfig,
|
||||
/// Prefer journal rather than archive.
|
||||
pub prefer_journal: bool,
|
||||
/// The name of the client instance.
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
impl Default for ClientConfig {
|
||||
@ -95,6 +97,7 @@ impl Default for ClientConfig {
|
||||
queue: Default::default(),
|
||||
blockchain: Default::default(),
|
||||
prefer_journal: false,
|
||||
name: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -182,6 +185,13 @@ pub trait BlockChainClient : Sync + Send {
|
||||
|
||||
/// Returns logs matching given filter.
|
||||
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry>;
|
||||
|
||||
/// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock.
|
||||
fn sealing_block(&self) -> &Mutex<Option<ClosedBlock>>;
|
||||
|
||||
/// Submit `seal` as a valid solution for the header of `pow_hash`.
|
||||
/// Will check the seal, but not actually insert the block into the chain.
|
||||
fn submit_seal(&self, pow_hash: H256, seal: Vec<Bytes>) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
#[derive(Default, Clone, Debug, Eq, PartialEq)]
|
||||
@ -193,6 +203,8 @@ pub struct ClientReport {
|
||||
pub transactions_applied: usize,
|
||||
/// How much gas has been processed so far.
|
||||
pub gas_processed: U256,
|
||||
/// Memory used by state DB
|
||||
pub state_db_mem: usize,
|
||||
}
|
||||
|
||||
impl ClientReport {
|
||||
@ -225,7 +237,7 @@ pub struct Client<V = CanonVerifier> where V: Verifier {
|
||||
}
|
||||
|
||||
const HISTORY: u64 = 1000;
|
||||
const CLIENT_DB_VER_STR: &'static str = "4.0";
|
||||
const CLIENT_DB_VER_STR: &'static str = "5.1";
|
||||
|
||||
impl Client<CanonVerifier> {
|
||||
/// Create a new client with given spec and DB path.
|
||||
@ -391,7 +403,8 @@ impl<V> Client<V> where V: Verifier {
|
||||
.commit(header.number(), &header.hash(), ancient)
|
||||
.expect("State DB commit failed.");
|
||||
|
||||
// And update the chain
|
||||
// And update the chain after commit to prevent race conditions
|
||||
// (when something is in chain but you are not able to fetch details)
|
||||
self.chain.write().unwrap()
|
||||
.insert_block(&block.bytes, receipts);
|
||||
|
||||
@ -404,8 +417,12 @@ impl<V> Client<V> where V: Verifier {
|
||||
|
||||
{
|
||||
let mut block_queue = self.block_queue.write().unwrap();
|
||||
block_queue.mark_as_bad(&bad_blocks);
|
||||
block_queue.mark_as_good(&good_blocks);
|
||||
if !bad_blocks.is_empty() {
|
||||
block_queue.mark_as_bad(&bad_blocks);
|
||||
}
|
||||
if !good_blocks.is_empty() {
|
||||
block_queue.mark_as_good(&good_blocks);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
@ -413,7 +430,9 @@ impl<V> Client<V> where V: Verifier {
|
||||
if !good_blocks.is_empty() && block_queue.queue_info().is_empty() {
|
||||
io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks {
|
||||
good: good_blocks,
|
||||
retracted: bad_blocks,
|
||||
bad: bad_blocks,
|
||||
// TODO [todr] were to take those from?
|
||||
retracted: vec![],
|
||||
})).unwrap();
|
||||
}
|
||||
}
|
||||
@ -437,7 +456,9 @@ impl<V> Client<V> where V: Verifier {
|
||||
|
||||
/// Get the report.
|
||||
pub fn report(&self) -> ClientReport {
|
||||
self.report.read().unwrap().clone()
|
||||
let mut report = self.report.read().unwrap().clone();
|
||||
report.state_db_mem = self.state_db.lock().unwrap().mem_used();
|
||||
report
|
||||
}
|
||||
|
||||
/// Tick the client.
|
||||
@ -509,39 +530,6 @@ impl<V> Client<V> where V: Verifier {
|
||||
trace!("Sealing: number={}, hash={}, diff={}", b.hash(), b.block().header().difficulty(), b.block().header().number());
|
||||
*self.sealing_block.lock().unwrap() = Some(b);
|
||||
}
|
||||
|
||||
/// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock.
|
||||
pub fn sealing_block(&self) -> &Mutex<Option<ClosedBlock>> {
|
||||
if self.sealing_block.lock().unwrap().is_none() {
|
||||
self.sealing_enabled.store(true, atomic::Ordering::Relaxed);
|
||||
// TODO: Above should be on a timer that resets after two blocks have arrived without being asked for.
|
||||
self.prepare_sealing();
|
||||
}
|
||||
&self.sealing_block
|
||||
}
|
||||
|
||||
/// Submit `seal` as a valid solution for the header of `pow_hash`.
|
||||
/// Will check the seal, but not actually insert the block into the chain.
|
||||
pub fn submit_seal(&self, pow_hash: H256, seal: Vec<Bytes>) -> Result<(), Error> {
|
||||
let mut maybe_b = self.sealing_block.lock().unwrap();
|
||||
match *maybe_b {
|
||||
Some(ref b) if b.hash() == pow_hash => {}
|
||||
_ => { return Err(Error::PowHashInvalid); }
|
||||
}
|
||||
|
||||
let b = maybe_b.take();
|
||||
match b.unwrap().try_seal(self.engine.deref().deref(), seal) {
|
||||
Err(old) => {
|
||||
*maybe_b = Some(old);
|
||||
Err(Error::PowInvalid)
|
||||
}
|
||||
Ok(sealed) => {
|
||||
// TODO: commit DB from `sealed.drain` and make a VerifiedBlock to skip running the transactions twice.
|
||||
try!(self.import_block(sealed.rlp_bytes()));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: need MinerService MinerIoHandler
|
||||
@ -704,6 +692,39 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock.
|
||||
fn sealing_block(&self) -> &Mutex<Option<ClosedBlock>> {
|
||||
if self.sealing_block.lock().unwrap().is_none() {
|
||||
self.sealing_enabled.store(true, atomic::Ordering::Relaxed);
|
||||
// TODO: Above should be on a timer that resets after two blocks have arrived without being asked for.
|
||||
self.prepare_sealing();
|
||||
}
|
||||
&self.sealing_block
|
||||
}
|
||||
|
||||
/// Submit `seal` as a valid solution for the header of `pow_hash`.
|
||||
/// Will check the seal, but not actually insert the block into the chain.
|
||||
fn submit_seal(&self, pow_hash: H256, seal: Vec<Bytes>) -> Result<(), Error> {
|
||||
let mut maybe_b = self.sealing_block.lock().unwrap();
|
||||
match *maybe_b {
|
||||
Some(ref b) if b.hash() == pow_hash => {}
|
||||
_ => { return Err(Error::PowHashInvalid); }
|
||||
}
|
||||
|
||||
let b = maybe_b.take();
|
||||
match b.unwrap().try_seal(self.engine.deref().deref(), seal) {
|
||||
Err(old) => {
|
||||
*maybe_b = Some(old);
|
||||
Err(Error::PowInvalid)
|
||||
}
|
||||
Ok(sealed) => {
|
||||
// TODO: commit DB from `sealed.drain` and make a VerifiedBlock to skip running the transactions twice.
|
||||
try!(self.import_block(sealed.rlp_bytes()));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MayPanic for Client {
|
||||
|
@ -202,7 +202,7 @@ impl Engine for Ethash {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="dev", allow(wrong_self_convention))] // to_ethash should take self
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] // to_ethash should take self
|
||||
impl Ethash {
|
||||
fn calculate_difficuty(&self, header: &Header, parent: &Header) -> U256 {
|
||||
const EXP_DIFF_PERIOD: u64 = 100000;
|
||||
|
@ -243,7 +243,7 @@ struct CodeReader<'a> {
|
||||
code: &'a Bytes
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="dev", allow(len_without_is_empty))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(len_without_is_empty))]
|
||||
impl<'a> CodeReader<'a> {
|
||||
/// Get `no_of_bytes` from code and convert to U256. Move PC
|
||||
fn read(&mut self, no_of_bytes: usize) -> U256 {
|
||||
@ -258,7 +258,7 @@ impl<'a> CodeReader<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="dev", allow(enum_variant_names))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(enum_variant_names))]
|
||||
enum InstructionCost {
|
||||
Gas(U256),
|
||||
GasMem(U256, U256),
|
||||
@ -347,7 +347,7 @@ impl evm::Evm for Interpreter {
|
||||
}
|
||||
|
||||
impl Interpreter {
|
||||
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
||||
fn get_gas_cost_mem(&self,
|
||||
ext: &evm::Ext,
|
||||
instruction: Instruction,
|
||||
|
@ -25,9 +25,8 @@ struct FakeLogEntry {
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Debug)]
|
||||
#[cfg_attr(feature="dev", allow(enum_variant_names))] // Common prefix is C ;)
|
||||
enum FakeCallType {
|
||||
CALL, CREATE
|
||||
Call, Create
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Debug)]
|
||||
@ -94,7 +93,7 @@ impl Ext for FakeExt {
|
||||
|
||||
fn create(&mut self, gas: &U256, value: &U256, code: &[u8]) -> ContractCreateResult {
|
||||
self.calls.insert(FakeCall {
|
||||
call_type: FakeCallType::CREATE,
|
||||
call_type: FakeCallType::Create,
|
||||
gas: *gas,
|
||||
sender_address: None,
|
||||
receive_address: None,
|
||||
@ -115,7 +114,7 @@ impl Ext for FakeExt {
|
||||
_output: &mut [u8]) -> MessageCallResult {
|
||||
|
||||
self.calls.insert(FakeCall {
|
||||
call_type: FakeCallType::CALL,
|
||||
call_type: FakeCallType::Call,
|
||||
gas: *gas,
|
||||
sender_address: Some(sender_address.clone()),
|
||||
receive_address: Some(receive_address.clone()),
|
||||
@ -909,7 +908,7 @@ fn test_calls(factory: super::Factory) {
|
||||
};
|
||||
|
||||
assert_set_contains(&ext.calls, &FakeCall {
|
||||
call_type: FakeCallType::CALL,
|
||||
call_type: FakeCallType::Call,
|
||||
gas: U256::from(2556),
|
||||
sender_address: Some(address.clone()),
|
||||
receive_address: Some(code_address.clone()),
|
||||
@ -918,7 +917,7 @@ fn test_calls(factory: super::Factory) {
|
||||
code_address: Some(code_address.clone())
|
||||
});
|
||||
assert_set_contains(&ext.calls, &FakeCall {
|
||||
call_type: FakeCallType::CALL,
|
||||
call_type: FakeCallType::Call,
|
||||
gas: U256::from(2556),
|
||||
sender_address: Some(address.clone()),
|
||||
receive_address: Some(address.clone()),
|
||||
|
@ -188,7 +188,7 @@ impl<'a> Ext for Externalities<'a> {
|
||||
self.state.code(address).unwrap_or_else(|| vec![])
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="dev", allow(match_ref_pats))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(match_ref_pats))]
|
||||
fn ret(&mut self, gas: &U256, data: &[u8]) -> Result<U256, evm::Error> {
|
||||
match &mut self.output {
|
||||
&mut OutputPolicy::Return(BytesRef::Fixed(ref mut slice)) => unsafe {
|
||||
@ -226,9 +226,9 @@ impl<'a> Ext for Externalities<'a> {
|
||||
|
||||
fn log(&mut self, topics: Vec<H256>, data: &[u8]) {
|
||||
let address = self.origin_info.address.clone();
|
||||
self.substate.logs.push(LogEntry {
|
||||
self.substate.logs.push(LogEntry {
|
||||
address: address,
|
||||
topics: topics,
|
||||
topics: topics,
|
||||
data: data.to_vec()
|
||||
});
|
||||
}
|
||||
|
@ -15,16 +15,16 @@
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#![warn(missing_docs)]
|
||||
#![cfg_attr(feature="dev", feature(plugin))]
|
||||
#![cfg_attr(feature="dev", plugin(clippy))]
|
||||
#![cfg_attr(all(nightly, feature="dev"), feature(plugin))]
|
||||
#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))]
|
||||
|
||||
// Clippy config
|
||||
// TODO [todr] not really sure
|
||||
#![cfg_attr(feature="dev", allow(needless_range_loop))]
|
||||
#![cfg_attr(all(nightly, feature="dev"), allow(needless_range_loop))]
|
||||
// Shorter than if-else
|
||||
#![cfg_attr(feature="dev", allow(match_bool))]
|
||||
#![cfg_attr(all(nightly, feature="dev"), allow(match_bool))]
|
||||
// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref.
|
||||
#![cfg_attr(feature="dev", allow(clone_on_copy))]
|
||||
#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))]
|
||||
|
||||
//! Ethcore library
|
||||
//!
|
||||
|
@ -30,6 +30,8 @@ pub enum SyncMessage {
|
||||
/// Hashes of blocks imported to blockchain
|
||||
good: Vec<H256>,
|
||||
/// Hashes of blocks not imported to blockchain
|
||||
bad: Vec<H256>,
|
||||
/// Hashes of blocks that were removed from canonical chain
|
||||
retracted: Vec<H256>,
|
||||
},
|
||||
/// A block is ready
|
||||
@ -115,12 +117,11 @@ impl IoHandler<NetSyncMessage> for ClientIoHandler {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="dev", allow(match_ref_pats))]
|
||||
#[cfg_attr(feature="dev", allow(single_match))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(single_match))]
|
||||
fn message(&self, io: &IoContext<NetSyncMessage>, net_message: &NetSyncMessage) {
|
||||
if let &UserMessage(ref message) = net_message {
|
||||
match message {
|
||||
&SyncMessage::BlockVerified => {
|
||||
if let UserMessage(ref message) = *net_message {
|
||||
match *message {
|
||||
SyncMessage::BlockVerified => {
|
||||
self.client.import_verified_blocks(&io.channel());
|
||||
},
|
||||
_ => {}, // ignore other messages
|
||||
|
@ -99,7 +99,7 @@ pub struct Spec {
|
||||
genesis_state: PodState,
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="dev", allow(wrong_self_convention))] // because to_engine(self) should be to_engine(&self)
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] // because to_engine(self) should be to_engine(&self)
|
||||
impl Spec {
|
||||
/// Convert this object into a boxed Engine of the right underlying type.
|
||||
// TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead.
|
||||
@ -136,7 +136,7 @@ impl Spec {
|
||||
uncles_hash: RlpStream::new_list(0).out().sha3(),
|
||||
extra_data: self.extra_data.clone(),
|
||||
state_root: self.state_root().clone(),
|
||||
receipts_root: self.receipts_root.clone(),
|
||||
receipts_root: self.receipts_root.clone(),
|
||||
log_bloom: H2048::new().clone(),
|
||||
gas_used: self.gas_used.clone(),
|
||||
gas_limit: self.gas_limit.clone(),
|
||||
@ -182,7 +182,7 @@ impl Spec {
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
self.parent_hash = H256::from_json(&genesis["parentHash"]);
|
||||
self.transactions_root = genesis.find("transactionsTrie").and_then(|_| Some(H256::from_json(&genesis["transactionsTrie"]))).unwrap_or(SHA3_NULL_RLP.clone());
|
||||
self.receipts_root = genesis.find("receiptTrie").and_then(|_| Some(H256::from_json(&genesis["receiptTrie"]))).unwrap_or(SHA3_NULL_RLP.clone());
|
||||
@ -249,7 +249,7 @@ impl FromJson for Spec {
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
Spec {
|
||||
name: json.find("name").map_or("unknown", |j| j.as_string().unwrap()).to_owned(),
|
||||
engine_name: json["engineName"].as_string().unwrap().to_owned(),
|
||||
@ -278,7 +278,7 @@ impl Spec {
|
||||
/// Ensure that the given state DB has the trie nodes in for the genesis state.
|
||||
pub fn ensure_db_good(&self, db: &mut HashDB) -> bool {
|
||||
if !db.contains(&self.state_root()) {
|
||||
let mut root = H256::new();
|
||||
let mut root = H256::new();
|
||||
{
|
||||
let mut t = SecTrieDBMut::new(db, &mut root);
|
||||
for (address, account) in self.genesis_state.get().iter() {
|
||||
|
@ -224,7 +224,7 @@ impl State {
|
||||
|
||||
/// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit.
|
||||
/// `accounts` is mutable because we may need to commit the code or storage and record that.
|
||||
#[cfg_attr(feature="dev", allow(match_ref_pats))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(match_ref_pats))]
|
||||
pub fn commit_into(db: &mut HashDB, root: &mut H256, accounts: &mut HashMap<Address, Option<Account>>) {
|
||||
// first, commit the sub trees.
|
||||
// TODO: is this necessary or can we dispense with the `ref mut a` for just `a`?
|
||||
|
@ -80,7 +80,7 @@ impl Transaction {
|
||||
}
|
||||
|
||||
impl FromJson for SignedTransaction {
|
||||
#[cfg_attr(feature="dev", allow(single_char_pattern))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(single_char_pattern))]
|
||||
fn from_json(json: &Json) -> SignedTransaction {
|
||||
let t = Transaction {
|
||||
nonce: xjson!(&json["nonce"]),
|
||||
|
@ -17,9 +17,11 @@
|
||||
pub mod verification;
|
||||
pub mod verifier;
|
||||
mod canon_verifier;
|
||||
#[cfg(test)]
|
||||
mod noop_verifier;
|
||||
|
||||
pub use self::verification::*;
|
||||
pub use self::verifier::Verifier;
|
||||
pub use self::canon_verifier::CanonVerifier;
|
||||
#[cfg(test)]
|
||||
pub use self::noop_verifier::NoopVerifier;
|
||||
|
2
hook.sh
2
hook.sh
@ -1,3 +1,3 @@
|
||||
#!/bin/sh
|
||||
echo "#!/bin/sh\ncargo test -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity --features dev" > ./.git/hooks/pre-push
|
||||
echo "#!/bin/sh\ncargo test -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity --features dev-clippy" > ./.git/hooks/pre-push
|
||||
chmod +x ./.git/hooks/pre-push
|
||||
|
208
parity/main.rs
208
parity/main.rs
@ -17,8 +17,8 @@
|
||||
//! Ethcore client application.
|
||||
|
||||
#![warn(missing_docs)]
|
||||
#![cfg_attr(feature="dev", feature(plugin))]
|
||||
#![cfg_attr(feature="dev", plugin(clippy))]
|
||||
#![cfg_attr(all(nightly, feature="dev"), feature(plugin))]
|
||||
#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))]
|
||||
extern crate docopt;
|
||||
extern crate rustc_serialize;
|
||||
extern crate ethcore_util as util;
|
||||
@ -32,6 +32,7 @@ extern crate fdlimit;
|
||||
extern crate daemonize;
|
||||
extern crate time;
|
||||
extern crate number_prefix;
|
||||
extern crate rpassword;
|
||||
|
||||
#[cfg(feature = "rpc")]
|
||||
extern crate ethcore_rpc as rpc;
|
||||
@ -43,16 +44,26 @@ use std::path::PathBuf;
|
||||
use env_logger::LogBuilder;
|
||||
use ctrlc::CtrlC;
|
||||
use util::*;
|
||||
use util::panics::MayPanic;
|
||||
use util::panics::{MayPanic, ForwardPanic, PanicHandler};
|
||||
use ethcore::spec::*;
|
||||
use ethcore::client::*;
|
||||
use ethcore::service::{ClientService, NetSyncMessage};
|
||||
use ethcore::ethereum;
|
||||
use ethsync::{EthSync, SyncConfig};
|
||||
use ethsync::{EthSync, SyncConfig, SyncStatusProvider};
|
||||
use docopt::Docopt;
|
||||
use daemonize::Daemonize;
|
||||
use number_prefix::{binary_prefix, Standalone, Prefixed};
|
||||
|
||||
fn die_with_message(msg: &str) -> ! {
|
||||
println!("ERROR: {}", msg);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! die {
|
||||
($($arg:tt)*) => (die_with_message(&format!("{}", format_args!($($arg)*))));
|
||||
}
|
||||
|
||||
const USAGE: &'static str = r#"
|
||||
Parity. Ethereum Client.
|
||||
By Wood/Paronyan/Kotewicz/Drwięga/Volf.
|
||||
@ -60,15 +71,20 @@ Parity. Ethereum Client.
|
||||
|
||||
Usage:
|
||||
parity daemon <pid-file> [options] [ --no-bootstrap | <enode>... ]
|
||||
parity account (new | list)
|
||||
parity [options] [ --no-bootstrap | <enode>... ]
|
||||
|
||||
Options:
|
||||
Protocol Options:
|
||||
--chain CHAIN Specify the blockchain type. CHAIN may be either a JSON chain specification file
|
||||
or frontier, mainnet, morden, or testnet [default: frontier].
|
||||
or olympic, frontier, homestead, mainnet, morden, or testnet [default: homestead].
|
||||
--testnet Equivalent to --chain testnet (geth-compatible).
|
||||
--networkid INDEX Override the network identifier from the chain we are on.
|
||||
--archive Client should not prune the state/storage trie.
|
||||
-d --db-path PATH Specify the database & configuration directory path [default: $HOME/.parity]
|
||||
-d --datadir PATH Specify the database & configuration directory path [default: $HOME/.parity]
|
||||
--keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys]
|
||||
--identity NAME Specify your node's name.
|
||||
|
||||
Networking Options:
|
||||
--no-bootstrap Don't bother trying to connect to any nodes initially.
|
||||
--listen-address URL Specify the IP/port on which to listen for peers [default: 0.0.0.0:30304].
|
||||
--public-address URL Specify the IP/port on which peers may connect.
|
||||
@ -78,18 +94,32 @@ Options:
|
||||
--no-upnp Disable trying to figure out the correct public adderss over UPnP.
|
||||
--node-key KEY Specify node secret key, either as 64-character hex string or input to SHA3 operation.
|
||||
|
||||
API and Console Options:
|
||||
-j --jsonrpc Enable the JSON-RPC API sever.
|
||||
--jsonrpc-addr HOST Specify the hostname portion of the JSONRPC API server [default: 127.0.0.1].
|
||||
--jsonrpc-port PORT Specify the port portion of the JSONRPC API server [default: 8545].
|
||||
--jsonrpc-cors URL Specify CORS header for JSON-RPC API responses [default: null].
|
||||
--jsonrpc-apis APIS Specify the APIs available through the JSONRPC interface. APIS is a comma-delimited
|
||||
list of API name. Possible name are web3, eth and net. [default: web3,eth,net].
|
||||
--rpc Equivalent to --jsonrpc (geth-compatible).
|
||||
--rpcaddr HOST Equivalent to --jsonrpc-addr HOST (geth-compatible).
|
||||
--rpcport PORT Equivalent to --jsonrpc-port PORT (geth-compatible).
|
||||
--rpcapi APIS Equivalent to --jsonrpc-apis APIS (geth-compatible).
|
||||
--rpccorsdomain URL Equivalent to --jsonrpc-cors URL (geth-compatible).
|
||||
|
||||
Sealing/Mining Options:
|
||||
--author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards
|
||||
from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63].
|
||||
--extradata STRING Specify a custom extra-data for authored blocks, no more than 32 characters.
|
||||
|
||||
Memory Footprint Options:
|
||||
--cache-pref-size BYTES Specify the prefered size of the blockchain cache in bytes [default: 16384].
|
||||
--cache-max-size BYTES Specify the maximum size of the blockchain cache in bytes [default: 262144].
|
||||
--queue-max-size BYTES Specify the maximum size of memory to use for block queue [default: 52428800].
|
||||
--cache MEGABYTES Set total amount of cache to use for the entire system, mutually exclusive with
|
||||
other cache options (geth-compatible).
|
||||
|
||||
-j --jsonrpc Enable the JSON-RPC API sever.
|
||||
--jsonrpc-url URL Specify URL for JSON-RPC API server [default: 127.0.0.1:8545].
|
||||
--jsonrpc-cors URL Specify CORS header for JSON-RPC API responses [default: null].
|
||||
|
||||
--author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards
|
||||
from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63].
|
||||
--extra-data STRING Specify a custom extra-data for authored blocks, no more than 32 characters.
|
||||
|
||||
Miscellaneous Options:
|
||||
-l --logging LOGGING Specify the logging level.
|
||||
-v --version Show information about version.
|
||||
-h --help Show this screen.
|
||||
@ -98,17 +128,24 @@ Options:
|
||||
#[derive(Debug, RustcDecodable)]
|
||||
struct Args {
|
||||
cmd_daemon: bool,
|
||||
cmd_account: bool,
|
||||
cmd_new: bool,
|
||||
cmd_list: bool,
|
||||
arg_pid_file: String,
|
||||
arg_enode: Vec<String>,
|
||||
flag_chain: String,
|
||||
flag_db_path: String,
|
||||
flag_testnet: bool,
|
||||
flag_datadir: String,
|
||||
flag_networkid: Option<String>,
|
||||
flag_identity: String,
|
||||
flag_cache: Option<usize>,
|
||||
flag_keys_path: String,
|
||||
flag_archive: bool,
|
||||
flag_no_bootstrap: bool,
|
||||
flag_listen_address: String,
|
||||
flag_public_address: Option<String>,
|
||||
flag_address: Option<String>,
|
||||
flag_peers: u32,
|
||||
flag_peers: usize,
|
||||
flag_no_discovery: bool,
|
||||
flag_no_upnp: bool,
|
||||
flag_node_key: Option<String>,
|
||||
@ -116,8 +153,15 @@ struct Args {
|
||||
flag_cache_max_size: usize,
|
||||
flag_queue_max_size: usize,
|
||||
flag_jsonrpc: bool,
|
||||
flag_jsonrpc_url: String,
|
||||
flag_jsonrpc_addr: String,
|
||||
flag_jsonrpc_port: u16,
|
||||
flag_jsonrpc_cors: String,
|
||||
flag_jsonrpc_apis: String,
|
||||
flag_rpc: bool,
|
||||
flag_rpcaddr: Option<String>,
|
||||
flag_rpcport: Option<u16>,
|
||||
flag_rpccorsdomain: Option<String>,
|
||||
flag_rpcapi: Option<String>,
|
||||
flag_logging: Option<String>,
|
||||
flag_version: bool,
|
||||
flag_author: String,
|
||||
@ -151,20 +195,29 @@ fn setup_log(init: &Option<String>) {
|
||||
}
|
||||
|
||||
#[cfg(feature = "rpc")]
|
||||
fn setup_rpc_server(client: Arc<Client>, sync: Arc<EthSync>, url: &str, cors_domain: &str) {
|
||||
fn setup_rpc_server(client: Arc<Client>, sync: Arc<EthSync>, url: &str, cors_domain: &str, apis: Vec<&str>) -> Option<Arc<PanicHandler>> {
|
||||
use rpc::v1::*;
|
||||
|
||||
let mut server = rpc::HttpServer::new(1);
|
||||
server.add_delegate(Web3Client::new().to_delegate());
|
||||
server.add_delegate(EthClient::new(&client, &sync).to_delegate());
|
||||
server.add_delegate(EthFilterClient::new(&client).to_delegate());
|
||||
server.add_delegate(NetClient::new(&sync).to_delegate());
|
||||
server.add_delegate(PersonalClient::new(&client).to_delegate());
|
||||
server.start_async(url, cors_domain);
|
||||
let server = rpc::RpcServer::new();
|
||||
for api in apis.into_iter() {
|
||||
match api {
|
||||
"web3" => server.add_delegate(Web3Client::new().to_delegate()),
|
||||
"net" => server.add_delegate(NetClient::new(&sync).to_delegate()),
|
||||
"eth" => {
|
||||
server.add_delegate(EthClient::new(&client, &sync).to_delegate());
|
||||
server.add_delegate(EthFilterClient::new(&client).to_delegate());
|
||||
}
|
||||
_ => {
|
||||
die!("{}: Invalid API name to be enabled.", api);
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(server.start_http(url, cors_domain, 1))
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "rpc"))]
|
||||
fn setup_rpc_server(_client: Arc<Client>, _sync: Arc<EthSync>, _url: &str) {
|
||||
fn setup_rpc_server(_client: Arc<Client>, _sync: Arc<EthSync>, _url: &str) -> Option<Arc<PanicHandler>> {
|
||||
None
|
||||
}
|
||||
|
||||
fn print_version() {
|
||||
@ -180,16 +233,6 @@ By Wood/Paronyan/Kotewicz/Drwięga/Volf.\
|
||||
", version());
|
||||
}
|
||||
|
||||
fn die_with_message(msg: &str) -> ! {
|
||||
println!("ERROR: {}", msg);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! die {
|
||||
($($arg:tt)*) => (die_with_message(&format!("{}", format_args!($($arg)*))));
|
||||
}
|
||||
|
||||
struct Configuration {
|
||||
args: Args
|
||||
}
|
||||
@ -202,7 +245,7 @@ impl Configuration {
|
||||
}
|
||||
|
||||
fn path(&self) -> String {
|
||||
self.args.flag_db_path.replace("$HOME", env::home_dir().unwrap().to_str().unwrap())
|
||||
self.args.flag_datadir.replace("$HOME", env::home_dir().unwrap().to_str().unwrap())
|
||||
}
|
||||
|
||||
fn author(&self) -> Address {
|
||||
@ -222,8 +265,11 @@ impl Configuration {
|
||||
}
|
||||
|
||||
fn spec(&self) -> Spec {
|
||||
if self.args.flag_testnet {
|
||||
return ethereum::new_morden();
|
||||
}
|
||||
match self.args.flag_chain.as_ref() {
|
||||
"frontier" | "mainnet" => ethereum::new_frontier(),
|
||||
"frontier" | "homestead" | "mainnet" => ethereum::new_frontier(),
|
||||
"morden" | "testnet" => ethereum::new_morden(),
|
||||
"olympic" => ethereum::new_olympic(),
|
||||
f => Spec::from_json_utf8(contents(f).unwrap_or_else(|_| die!("{}: Couldn't read chain specification file. Sure it exists?", f)).as_ref()),
|
||||
@ -247,7 +293,7 @@ impl Configuration {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="dev", allow(useless_format))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(useless_format))]
|
||||
fn net_addresses(&self) -> (Option<SocketAddr>, Option<SocketAddr>) {
|
||||
let mut listen_address = None;
|
||||
let mut public_address = None;
|
||||
@ -277,7 +323,7 @@ impl Configuration {
|
||||
ret.public_address = public;
|
||||
ret.use_secret = self.args.flag_node_key.as_ref().map(|s| Secret::from_str(&s).unwrap_or_else(|_| s.sha3()));
|
||||
ret.discovery_enabled = !self.args.flag_no_discovery;
|
||||
ret.ideal_peers = self.args.flag_peers;
|
||||
ret.ideal_peers = self.args.flag_peers as u32;
|
||||
let mut net_path = PathBuf::from(&self.path());
|
||||
net_path.push("network");
|
||||
ret.config_path = Some(net_path.to_str().unwrap().to_owned());
|
||||
@ -296,10 +342,44 @@ impl Configuration {
|
||||
.start()
|
||||
.unwrap_or_else(|e| die!("Couldn't daemonize; {}", e));
|
||||
}
|
||||
if self.args.cmd_account {
|
||||
self.execute_account_cli();
|
||||
return;
|
||||
}
|
||||
self.execute_client();
|
||||
}
|
||||
|
||||
fn execute_account_cli(&self) {
|
||||
use util::keys::store::SecretStore;
|
||||
use rpassword::read_password;
|
||||
let mut secret_store = SecretStore::new();
|
||||
if self.args.cmd_new {
|
||||
println!("Please note that password is NOT RECOVERABLE.");
|
||||
println!("Type password: ");
|
||||
let password = read_password().unwrap();
|
||||
println!("Repeat password: ");
|
||||
let password_repeat = read_password().unwrap();
|
||||
if password != password_repeat {
|
||||
println!("Passwords do not match!");
|
||||
return;
|
||||
}
|
||||
println!("New account address:");
|
||||
let new_address = secret_store.new_account(&password).unwrap();
|
||||
println!("{:?}", new_address);
|
||||
return;
|
||||
}
|
||||
if self.args.cmd_list {
|
||||
println!("Known addresses:");
|
||||
for &(addr, _) in secret_store.accounts().unwrap().iter() {
|
||||
println!("{:?}", addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn execute_client(&self) {
|
||||
// Setup panic handler
|
||||
let panic_handler = PanicHandler::new_in_arc();
|
||||
|
||||
// Setup logging
|
||||
setup_log(&self.args.flag_logging);
|
||||
// Raise fdlimit
|
||||
@ -308,15 +388,25 @@ impl Configuration {
|
||||
let spec = self.spec();
|
||||
let net_settings = self.net_settings(&spec);
|
||||
let mut sync_config = SyncConfig::default();
|
||||
sync_config.network_id = spec.network_id();
|
||||
sync_config.network_id = self.args.flag_networkid.as_ref().map(|id| U256::from_str(id).unwrap_or_else(|_| die!("{}: Invalid index given with --networkid", id))).unwrap_or(spec.network_id());
|
||||
|
||||
// Build client
|
||||
let mut client_config = ClientConfig::default();
|
||||
client_config.blockchain.pref_cache_size = self.args.flag_cache_pref_size;
|
||||
client_config.blockchain.max_cache_size = self.args.flag_cache_max_size;
|
||||
match self.args.flag_cache {
|
||||
Some(mb) => {
|
||||
client_config.blockchain.max_cache_size = mb * 1024 * 1024;
|
||||
client_config.blockchain.pref_cache_size = client_config.blockchain.max_cache_size / 2;
|
||||
}
|
||||
None => {
|
||||
client_config.blockchain.pref_cache_size = self.args.flag_cache_pref_size;
|
||||
client_config.blockchain.max_cache_size = self.args.flag_cache_max_size;
|
||||
}
|
||||
}
|
||||
client_config.prefer_journal = !self.args.flag_archive;
|
||||
client_config.name = self.args.flag_identity.clone();
|
||||
client_config.queue.max_mem_use = self.args.flag_queue_max_size;
|
||||
let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap();
|
||||
panic_handler.forward_from(&service);
|
||||
let client = service.client().clone();
|
||||
client.set_author(self.author());
|
||||
client.set_extra_data(self.extra_data());
|
||||
@ -325,32 +415,45 @@ impl Configuration {
|
||||
let sync = EthSync::register(service.network(), sync_config, client);
|
||||
|
||||
// Setup rpc
|
||||
if self.args.flag_jsonrpc {
|
||||
setup_rpc_server(service.client(), sync.clone(), &self.args.flag_jsonrpc_url, &self.args.flag_jsonrpc_cors);
|
||||
SocketAddr::from_str(&self.args.flag_jsonrpc_url).unwrap_or_else(|_|die!("{}: Invalid JSONRPC listen address given with --jsonrpc-url. Should be of the form 'IP:port'.", self.args.flag_jsonrpc_url));
|
||||
if self.args.flag_jsonrpc || self.args.flag_rpc {
|
||||
let url = format!("{}:{}",
|
||||
self.args.flag_rpcaddr.as_ref().unwrap_or(&self.args.flag_jsonrpc_addr),
|
||||
self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port)
|
||||
);
|
||||
SocketAddr::from_str(&url).unwrap_or_else(|_|die!("{}: Invalid JSONRPC listen host/port given.", url));
|
||||
let cors = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors);
|
||||
// TODO: use this as the API list.
|
||||
let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis);
|
||||
let server_handler = setup_rpc_server(service.client(), sync.clone(), &url, cors, apis.split(",").collect());
|
||||
if let Some(handler) = server_handler {
|
||||
panic_handler.forward_from(handler.deref());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Register IO handler
|
||||
let io_handler = Arc::new(ClientIoHandler {
|
||||
client: service.client(),
|
||||
info: Default::default(),
|
||||
sync: sync
|
||||
sync: sync.clone(),
|
||||
});
|
||||
service.io().register_handler(io_handler).expect("Error registering IO handler");
|
||||
|
||||
// Handle exit
|
||||
wait_for_exit(&service);
|
||||
wait_for_exit(panic_handler);
|
||||
}
|
||||
}
|
||||
|
||||
fn wait_for_exit(client_service: &ClientService) {
|
||||
fn wait_for_exit(panic_handler: Arc<PanicHandler>) {
|
||||
let exit = Arc::new(Condvar::new());
|
||||
|
||||
// Handle possible exits
|
||||
let e = exit.clone();
|
||||
CtrlC::set_handler(move || { e.notify_all(); });
|
||||
|
||||
// Handle panics
|
||||
let e = exit.clone();
|
||||
client_service.on_panic(move |_reason| { e.notify_all(); });
|
||||
panic_handler.on_panic(move |_reason| { e.notify_all(); });
|
||||
|
||||
// Wait for signal
|
||||
let mutex = Mutex::new(());
|
||||
@ -396,7 +499,7 @@ impl Informant {
|
||||
let sync_info = sync.status();
|
||||
|
||||
if let (_, _, &Some(ref last_report)) = (self.chain_info.read().unwrap().deref(), self.cache_info.read().unwrap().deref(), self.report.read().unwrap().deref()) {
|
||||
println!("[ #{} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, #{}, {}+{} queued ···// mem: {} chain, {} queue, {} sync ]",
|
||||
println!("[ #{} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, #{}, {}+{} queued ···// mem: {} db, {} chain, {} queue, {} sync ]",
|
||||
chain_info.best_block_number,
|
||||
chain_info.best_block_hash,
|
||||
(report.blocks_imported - last_report.blocks_imported) / dur,
|
||||
@ -409,6 +512,7 @@ impl Informant {
|
||||
queue_info.unverified_queue_size,
|
||||
queue_info.verified_queue_size,
|
||||
|
||||
Informant::format_bytes(report.state_db_mem),
|
||||
Informant::format_bytes(cache_info.total()),
|
||||
Informant::format_bytes(queue_info.mem_used),
|
||||
Informant::format_bytes(sync_info.mem_used),
|
||||
|
@ -12,8 +12,8 @@ build = "build.rs"
|
||||
log = "0.3"
|
||||
serde = "0.7.0"
|
||||
serde_json = "0.7.0"
|
||||
jsonrpc-core = "1.2"
|
||||
jsonrpc-http-server = "2.1"
|
||||
jsonrpc-core = "2.0"
|
||||
jsonrpc-http-server = "3.0"
|
||||
ethcore-util = { path = "../util" }
|
||||
ethcore = { path = "../ethcore" }
|
||||
ethash = { path = "../ethash" }
|
||||
@ -26,8 +26,9 @@ serde_macros = { version = "0.7.0", optional = true }
|
||||
[build-dependencies]
|
||||
serde_codegen = { version = "0.7.0", optional = true }
|
||||
syntex = "0.29.0"
|
||||
rustc_version = "0.1"
|
||||
|
||||
[features]
|
||||
default = ["serde_codegen"]
|
||||
nightly = ["serde_macros"]
|
||||
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev"]
|
||||
dev = ["ethcore/dev", "ethcore-util/dev", "ethsync/dev"]
|
||||
|
23
rpc/build.rs
23
rpc/build.rs
@ -1,3 +1,23 @@
|
||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
extern crate rustc_version;
|
||||
|
||||
use rustc_version::{version_meta, Channel};
|
||||
|
||||
#[cfg(not(feature = "serde_macros"))]
|
||||
mod inner {
|
||||
extern crate syntex;
|
||||
@ -26,4 +46,7 @@ mod inner {
|
||||
|
||||
fn main() {
|
||||
inner::main();
|
||||
if let Channel::Nightly = version_meta().channel {
|
||||
println!("cargo:rustc-cfg=nightly");
|
||||
}
|
||||
}
|
||||
|
@ -29,33 +29,43 @@ extern crate ethcore;
|
||||
extern crate ethsync;
|
||||
extern crate transient_hashmap;
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use util::panics::PanicHandler;
|
||||
use self::jsonrpc_core::{IoHandler, IoDelegate};
|
||||
|
||||
pub mod v1;
|
||||
|
||||
/// Http server.
|
||||
pub struct HttpServer {
|
||||
handler: IoHandler,
|
||||
threads: usize
|
||||
pub struct RpcServer {
|
||||
handler: Arc<IoHandler>,
|
||||
}
|
||||
|
||||
impl HttpServer {
|
||||
impl RpcServer {
|
||||
/// Construct new http server object with given number of threads.
|
||||
pub fn new(threads: usize) -> HttpServer {
|
||||
HttpServer {
|
||||
handler: IoHandler::new(),
|
||||
threads: threads
|
||||
pub fn new() -> RpcServer {
|
||||
RpcServer {
|
||||
handler: Arc::new(IoHandler::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Add io delegate.
|
||||
pub fn add_delegate<D>(&mut self, delegate: IoDelegate<D>) where D: Send + Sync + 'static {
|
||||
pub fn add_delegate<D>(&self, delegate: IoDelegate<D>) where D: Send + Sync + 'static {
|
||||
self.handler.add_delegate(delegate);
|
||||
}
|
||||
|
||||
/// Start server asynchronously in new thread
|
||||
pub fn start_async(self, addr: &str, cors_domain: &str) {
|
||||
let server = jsonrpc_http_server::Server::new(self.handler, self.threads);
|
||||
server.start_async(addr, jsonrpc_http_server::AccessControlAllowOrigin::Value(cors_domain.to_owned()))
|
||||
/// Start server asynchronously in new thread and returns panic handler.
|
||||
pub fn start_http(&self, addr: &str, cors_domain: &str, threads: usize) -> Arc<PanicHandler> {
|
||||
let addr = addr.to_owned();
|
||||
let cors_domain = cors_domain.to_owned();
|
||||
let panic_handler = PanicHandler::new_in_arc();
|
||||
let ph = panic_handler.clone();
|
||||
let server = jsonrpc_http_server::Server::new(self.handler.clone());
|
||||
thread::Builder::new().name("jsonrpc_http".to_string()).spawn(move || {
|
||||
ph.catch_panic(move || {
|
||||
server.start(addr.as_ref(), jsonrpc_http_server::AccessControlAllowOrigin::Value(cors_domain), threads);
|
||||
}).unwrap()
|
||||
}).expect("Error while creating jsonrpc http thread");
|
||||
panic_handler
|
||||
}
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ impl<F, T> PollManager<F, T> where T: Timer {
|
||||
}
|
||||
|
||||
/// Returns number of block when last poll happend.
|
||||
pub fn get_poll_info(&mut self, id: &PollId) -> Option<&PollInfo<F>> {
|
||||
pub fn poll_info(&mut self, id: &PollId) -> Option<&PollInfo<F>> {
|
||||
self.polls.prune();
|
||||
self.polls.get(id)
|
||||
}
|
||||
@ -124,21 +124,21 @@ mod tests {
|
||||
|
||||
*time.borrow_mut() = 10;
|
||||
indexer.update_poll(&0, 21);
|
||||
assert_eq!(indexer.get_poll_info(&0).unwrap().filter, false);
|
||||
assert_eq!(indexer.get_poll_info(&0).unwrap().block_number, 21);
|
||||
assert_eq!(indexer.poll_info(&0).unwrap().filter, false);
|
||||
assert_eq!(indexer.poll_info(&0).unwrap().block_number, 21);
|
||||
|
||||
*time.borrow_mut() = 30;
|
||||
indexer.update_poll(&1, 23);
|
||||
assert_eq!(indexer.get_poll_info(&1).unwrap().filter, true);
|
||||
assert_eq!(indexer.get_poll_info(&1).unwrap().block_number, 23);
|
||||
assert_eq!(indexer.poll_info(&1).unwrap().filter, true);
|
||||
assert_eq!(indexer.poll_info(&1).unwrap().block_number, 23);
|
||||
|
||||
*time.borrow_mut() = 75;
|
||||
indexer.update_poll(&0, 30);
|
||||
assert!(indexer.get_poll_info(&0).is_none());
|
||||
assert_eq!(indexer.get_poll_info(&1).unwrap().filter, true);
|
||||
assert_eq!(indexer.get_poll_info(&1).unwrap().block_number, 23);
|
||||
assert!(indexer.poll_info(&0).is_none());
|
||||
assert_eq!(indexer.poll_info(&1).unwrap().filter, true);
|
||||
assert_eq!(indexer.poll_info(&1).unwrap().block_number, 23);
|
||||
|
||||
indexer.remove_poll(&1);
|
||||
assert!(indexer.get_poll_info(&1).is_none());
|
||||
assert!(indexer.poll_info(&1).is_none());
|
||||
}
|
||||
}
|
||||
|
@ -17,7 +17,7 @@
|
||||
//! Eth rpc implementation.
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Weak, Mutex, RwLock};
|
||||
use ethsync::{EthSync, SyncState};
|
||||
use ethsync::{SyncStatusProvider, SyncState};
|
||||
use jsonrpc_core::*;
|
||||
use util::numbers::*;
|
||||
use util::sha3::*;
|
||||
@ -25,7 +25,6 @@ use util::rlp::encode;
|
||||
use ethcore::client::*;
|
||||
use ethcore::block::{IsBlock};
|
||||
use ethcore::views::*;
|
||||
//#[macro_use] extern crate log;
|
||||
use ethcore::ethereum::Ethash;
|
||||
use ethcore::ethereum::denominations::shannon;
|
||||
use v1::traits::{Eth, EthFilter};
|
||||
@ -33,15 +32,15 @@ use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncIn
|
||||
use v1::helpers::{PollFilter, PollManager};
|
||||
|
||||
/// Eth rpc implementation.
|
||||
pub struct EthClient {
|
||||
client: Weak<Client>,
|
||||
sync: Weak<EthSync>,
|
||||
pub struct EthClient<C, S> where C: BlockChainClient, S: SyncStatusProvider {
|
||||
client: Weak<C>,
|
||||
sync: Weak<S>,
|
||||
hashrates: RwLock<HashMap<H256, u64>>,
|
||||
}
|
||||
|
||||
impl EthClient {
|
||||
impl<C, S> EthClient<C, S> where C: BlockChainClient, S: SyncStatusProvider {
|
||||
/// Creates new EthClient.
|
||||
pub fn new(client: &Arc<Client>, sync: &Arc<EthSync>) -> Self {
|
||||
pub fn new(client: &Arc<C>, sync: &Arc<S>) -> Self {
|
||||
EthClient {
|
||||
client: Arc::downgrade(client),
|
||||
sync: Arc::downgrade(sync),
|
||||
@ -95,7 +94,7 @@ impl EthClient {
|
||||
}
|
||||
}
|
||||
|
||||
impl Eth for EthClient {
|
||||
impl<C, S> Eth for EthClient<C, S> where C: BlockChainClient + 'static, S: SyncStatusProvider + 'static {
|
||||
fn protocol_version(&self, params: Params) -> Result<Value, Error> {
|
||||
match params {
|
||||
Params::None => to_value(&U256::from(take_weak!(self.sync).status().protocol_version)),
|
||||
@ -275,14 +274,14 @@ impl Eth for EthClient {
|
||||
}
|
||||
|
||||
/// Eth filter rpc implementation.
|
||||
pub struct EthFilterClient {
|
||||
client: Weak<Client>,
|
||||
pub struct EthFilterClient<C> where C: BlockChainClient {
|
||||
client: Weak<C>,
|
||||
polls: Mutex<PollManager<PollFilter>>,
|
||||
}
|
||||
|
||||
impl EthFilterClient {
|
||||
impl<C> EthFilterClient<C> where C: BlockChainClient {
|
||||
/// Creates new Eth filter client.
|
||||
pub fn new(client: &Arc<Client>) -> Self {
|
||||
pub fn new(client: &Arc<C>) -> Self {
|
||||
EthFilterClient {
|
||||
client: Arc::downgrade(client),
|
||||
polls: Mutex::new(PollManager::new())
|
||||
@ -290,7 +289,7 @@ impl EthFilterClient {
|
||||
}
|
||||
}
|
||||
|
||||
impl EthFilter for EthFilterClient {
|
||||
impl<C> EthFilter for EthFilterClient<C> where C: BlockChainClient + 'static {
|
||||
fn new_filter(&self, params: Params) -> Result<Value, Error> {
|
||||
from_params::<(Filter,)>(params)
|
||||
.and_then(|(filter,)| {
|
||||
@ -326,12 +325,13 @@ impl EthFilter for EthFilterClient {
|
||||
let client = take_weak!(self.client);
|
||||
from_params::<(Index,)>(params)
|
||||
.and_then(|(index,)| {
|
||||
let info = self.polls.lock().unwrap().get_poll_info(&index.value()).cloned();
|
||||
let info = self.polls.lock().unwrap().poll_info(&index.value()).cloned();
|
||||
match info {
|
||||
None => Ok(Value::Array(vec![] as Vec<Value>)),
|
||||
Some(info) => match info.filter {
|
||||
PollFilter::Block => {
|
||||
let current_number = client.chain_info().best_block_number;
|
||||
// + 1, cause we want to return hashes including current block hash.
|
||||
let current_number = client.chain_info().best_block_number + 1;
|
||||
let hashes = (info.block_number..current_number).into_iter()
|
||||
.map(BlockId::Number)
|
||||
.filter_map(|id| client.block_hash(id))
|
||||
|
@ -17,24 +17,24 @@
|
||||
//! Net rpc implementation.
|
||||
use std::sync::{Arc, Weak};
|
||||
use jsonrpc_core::*;
|
||||
use ethsync::EthSync;
|
||||
use ethsync::SyncStatusProvider;
|
||||
use v1::traits::Net;
|
||||
|
||||
/// Net rpc implementation.
|
||||
pub struct NetClient {
|
||||
sync: Weak<EthSync>
|
||||
pub struct NetClient<S> where S: SyncStatusProvider {
|
||||
sync: Weak<S>
|
||||
}
|
||||
|
||||
impl NetClient {
|
||||
impl<S> NetClient<S> where S: SyncStatusProvider {
|
||||
/// Creates new NetClient.
|
||||
pub fn new(sync: &Arc<EthSync>) -> Self {
|
||||
pub fn new(sync: &Arc<S>) -> Self {
|
||||
NetClient {
|
||||
sync: Arc::downgrade(sync)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Net for NetClient {
|
||||
impl<S> Net for NetClient<S> where S: SyncStatusProvider + 'static {
|
||||
fn version(&self, _: Params) -> Result<Value, Error> {
|
||||
Ok(Value::U64(take_weak!(self.sync).status().protocol_version as u64))
|
||||
}
|
||||
|
@ -4,9 +4,13 @@ name = "ethsync"
|
||||
version = "0.9.99"
|
||||
license = "GPL-3.0"
|
||||
authors = ["Ethcore <admin@ethcore.io"]
|
||||
build = "build.rs"
|
||||
|
||||
[lib]
|
||||
|
||||
[build-dependencies]
|
||||
rustc_version = "0.1"
|
||||
|
||||
[dependencies]
|
||||
ethcore-util = { path = "../util" }
|
||||
ethcore = { path = "../ethcore" }
|
||||
@ -21,4 +25,4 @@ rayon = "0.3.1"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
dev = ["clippy", "ethcore/dev", "ethcore-util/dev"]
|
||||
dev = ["ethcore/dev", "ethcore-util/dev"]
|
||||
|
25
sync/build.rs
Normal file
25
sync/build.rs
Normal file
@ -0,0 +1,25 @@
|
||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
extern crate rustc_version;
|
||||
|
||||
use rustc_version::{version_meta, Channel};
|
||||
|
||||
fn main() {
|
||||
if let Channel::Nightly = version_meta().channel {
|
||||
println!("cargo:rustc-cfg=nightly");
|
||||
}
|
||||
}
|
@ -43,6 +43,7 @@ use io::SyncIo;
|
||||
use transaction_queue::TransactionQueue;
|
||||
use time;
|
||||
use super::SyncConfig;
|
||||
use ethcore;
|
||||
|
||||
known_heap_size!(0, PeerInfo, Header, HeaderId);
|
||||
|
||||
@ -207,7 +208,7 @@ pub struct ChainSync {
|
||||
/// True if common block for our and remote chain has been found
|
||||
have_common_block: bool,
|
||||
/// Last propagated block number
|
||||
last_send_block_number: BlockNumber,
|
||||
last_sent_block_number: BlockNumber,
|
||||
/// Max blocks to download ahead
|
||||
max_download_ahead_blocks: usize,
|
||||
/// Network ID
|
||||
@ -236,7 +237,7 @@ impl ChainSync {
|
||||
last_imported_hash: None,
|
||||
syncing_difficulty: U256::from(0u64),
|
||||
have_common_block: false,
|
||||
last_send_block_number: 0,
|
||||
last_sent_block_number: 0,
|
||||
max_download_ahead_blocks: max(MAX_HEADERS_TO_REQUEST, config.max_download_ahead_blocks),
|
||||
network_id: config.network_id,
|
||||
transaction_queue: Mutex::new(TransactionQueue::new()),
|
||||
@ -274,7 +275,7 @@ impl ChainSync {
|
||||
}
|
||||
|
||||
|
||||
#[cfg_attr(feature="dev", allow(for_kv_map))] // Because it's not possible to get `values_mut()`
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(for_kv_map))] // Because it's not possible to get `values_mut()`
|
||||
/// Rest sync. Clear all downloaded data but keep the queue
|
||||
fn reset(&mut self) {
|
||||
self.downloading_headers.clear();
|
||||
@ -342,7 +343,7 @@ impl ChainSync {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
||||
/// Called by peer once it has new block headers during sync
|
||||
fn on_peer_block_headers(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
|
||||
self.reset_peer_asking(peer_id, PeerAsking::BlockHeaders);
|
||||
@ -469,6 +470,7 @@ impl ChainSync {
|
||||
}
|
||||
|
||||
/// Called by peer once it has new block bodies
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
||||
fn on_peer_new_block(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
|
||||
let block_rlp = try!(r.at(0));
|
||||
let header_rlp = try!(block_rlp.at(0));
|
||||
@ -850,8 +852,8 @@ impl ChainSync {
|
||||
self.downloading_bodies.remove(&n);
|
||||
self.downloading_headers.remove(&n);
|
||||
}
|
||||
self.headers.remove_tail(&start);
|
||||
self.bodies.remove_tail(&start);
|
||||
self.headers.remove_from(&start);
|
||||
self.bodies.remove_from(&start);
|
||||
}
|
||||
|
||||
/// Request headers from a peer by block hash
|
||||
@ -907,9 +909,8 @@ impl ChainSync {
|
||||
}
|
||||
match sync.send(peer_id, packet_id, packet) {
|
||||
Err(e) => {
|
||||
warn!(target:"sync", "Error sending request: {:?}", e);
|
||||
debug!(target:"sync", "Error sending request: {:?}", e);
|
||||
sync.disable_peer(peer_id);
|
||||
self.on_peer_aborting(sync, peer_id);
|
||||
}
|
||||
Ok(_) => {
|
||||
let mut peer = self.peers.get_mut(&peer_id).unwrap();
|
||||
@ -922,9 +923,8 @@ impl ChainSync {
|
||||
/// Generic packet sender
|
||||
fn send_packet(&mut self, sync: &mut SyncIo, peer_id: PeerId, packet_id: PacketId, packet: Bytes) {
|
||||
if let Err(e) = sync.send(peer_id, packet_id, packet) {
|
||||
warn!(target:"sync", "Error sending packet: {:?}", e);
|
||||
debug!(target:"sync", "Error sending packet: {:?}", e);
|
||||
sync.disable_peer(peer_id);
|
||||
self.on_peer_aborting(sync, peer_id);
|
||||
}
|
||||
}
|
||||
/// Called when peer sends us new transactions
|
||||
@ -933,9 +933,11 @@ impl ChainSync {
|
||||
let item_count = r.item_count();
|
||||
trace!(target: "sync", "{} -> Transactions ({} entries)", peer_id, item_count);
|
||||
let fetch_latest_nonce = |a : &Address| chain.nonce(a);
|
||||
|
||||
let mut transaction_queue = self.transaction_queue.lock().unwrap();
|
||||
for i in 0..item_count {
|
||||
let tx: SignedTransaction = try!(r.val_at(i));
|
||||
self.transaction_queue.lock().unwrap().add(tx, &fetch_latest_nonce);
|
||||
let _ = transaction_queue.add(tx, &fetch_latest_nonce);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -1246,26 +1248,25 @@ impl ChainSync {
|
||||
sent
|
||||
}
|
||||
|
||||
fn propagate_latest_blocks(&mut self, io: &mut SyncIo) {
|
||||
let chain_info = io.chain().chain_info();
|
||||
if (((chain_info.best_block_number as i64) - (self.last_sent_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION {
|
||||
let blocks = self.propagate_blocks(&chain_info, io);
|
||||
let hashes = self.propagate_new_hashes(&chain_info, io);
|
||||
if blocks != 0 || hashes != 0 {
|
||||
trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes);
|
||||
}
|
||||
}
|
||||
self.last_sent_block_number = chain_info.best_block_number;
|
||||
}
|
||||
|
||||
/// Maintain other peers. Send out any new blocks and transactions
|
||||
pub fn maintain_sync(&mut self, io: &mut SyncIo) {
|
||||
self.check_resume(io);
|
||||
}
|
||||
|
||||
/// should be called once chain has new block, triggers the latest block propagation
|
||||
pub fn chain_blocks_verified(&mut self, io: &mut SyncIo) {
|
||||
let chain = io.chain().chain_info();
|
||||
if (((chain.best_block_number as i64) - (self.last_send_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION {
|
||||
let blocks = self.propagate_blocks(&chain, io);
|
||||
let hashes = self.propagate_new_hashes(&chain, io);
|
||||
if blocks != 0 || hashes != 0 {
|
||||
trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes);
|
||||
}
|
||||
}
|
||||
self.last_send_block_number = chain.best_block_number;
|
||||
}
|
||||
|
||||
/// called when block is imported to chain, updates transactions queue
|
||||
pub fn chain_new_blocks(&mut self, io: &SyncIo, good: &[H256], retracted: &[H256]) {
|
||||
/// called when block is imported to chain, updates transactions queue and propagates the blocks
|
||||
pub fn chain_new_blocks(&mut self, io: &mut SyncIo, good: &[H256], bad: &[H256], _retracted: &[H256]) {
|
||||
fn fetch_transactions(chain: &BlockChainClient, hash: &H256) -> Vec<SignedTransaction> {
|
||||
let block = chain
|
||||
.block(BlockId::Hash(hash.clone()))
|
||||
@ -1276,23 +1277,37 @@ impl ChainSync {
|
||||
}
|
||||
|
||||
|
||||
let chain = io.chain();
|
||||
let good = good.par_iter().map(|h| fetch_transactions(chain, h));
|
||||
let retracted = retracted.par_iter().map(|h| fetch_transactions(chain, h));
|
||||
{
|
||||
let chain = io.chain();
|
||||
let good = good.par_iter().map(|h| fetch_transactions(chain, h));
|
||||
let bad = bad.par_iter().map(|h| fetch_transactions(chain, h));
|
||||
|
||||
good.for_each(|txs| {
|
||||
let mut transaction_queue = self.transaction_queue.lock().unwrap();
|
||||
let hashes = txs.iter().map(|tx| tx.hash()).collect::<Vec<H256>>();
|
||||
transaction_queue.remove_all(&hashes, |a| chain.nonce(a));
|
||||
});
|
||||
retracted.for_each(|txs| {
|
||||
// populate sender
|
||||
for tx in &txs {
|
||||
let _sender = tx.sender();
|
||||
}
|
||||
let mut transaction_queue = self.transaction_queue.lock().unwrap();
|
||||
transaction_queue.add_all(txs, |a| chain.nonce(a));
|
||||
});
|
||||
good.for_each(|txs| {
|
||||
let mut transaction_queue = self.transaction_queue.lock().unwrap();
|
||||
let hashes = txs.iter().map(|tx| tx.hash()).collect::<Vec<H256>>();
|
||||
transaction_queue.remove_all(&hashes, |a| chain.nonce(a));
|
||||
});
|
||||
bad.for_each(|txs| {
|
||||
// populate sender
|
||||
for tx in &txs {
|
||||
let _sender = tx.sender();
|
||||
}
|
||||
let mut transaction_queue = self.transaction_queue.lock().unwrap();
|
||||
let _ = transaction_queue.add_all(txs, |a| chain.nonce(a));
|
||||
});
|
||||
}
|
||||
|
||||
// Propagate latests blocks
|
||||
self.propagate_latest_blocks(io);
|
||||
// TODO [todr] propagate transactions?
|
||||
}
|
||||
|
||||
/// Add transaction to the transaction queue
|
||||
pub fn insert_transaction<T>(&self, transaction: ethcore::transaction::SignedTransaction, fetch_nonce: &T)
|
||||
where T: Fn(&Address) -> U256
|
||||
{
|
||||
let mut queue = self.transaction_queue.lock().unwrap();
|
||||
queue.add(transaction, fetch_nonce);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1632,13 +1647,13 @@ mod tests {
|
||||
let retracted_blocks = vec![client.block_hash_delta_minus(1)];
|
||||
|
||||
let mut queue = VecDeque::new();
|
||||
let io = TestIo::new(&mut client, &mut queue, None);
|
||||
let mut io = TestIo::new(&mut client, &mut queue, None);
|
||||
|
||||
// when
|
||||
sync.chain_new_blocks(&io, &[], &good_blocks);
|
||||
sync.chain_new_blocks(&mut io, &[], &good_blocks, &[]);
|
||||
assert_eq!(sync.transaction_queue.lock().unwrap().status().future, 0);
|
||||
assert_eq!(sync.transaction_queue.lock().unwrap().status().pending, 1);
|
||||
sync.chain_new_blocks(&io, &good_blocks, &retracted_blocks);
|
||||
sync.chain_new_blocks(&mut io, &good_blocks, &retracted_blocks, &[]);
|
||||
|
||||
// then
|
||||
let status = sync.transaction_queue.lock().unwrap().status();
|
||||
|
@ -15,11 +15,11 @@
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#![warn(missing_docs)]
|
||||
#![cfg_attr(feature="dev", feature(plugin))]
|
||||
#![cfg_attr(feature="dev", plugin(clippy))]
|
||||
#![cfg_attr(all(nightly, feature="dev"), feature(plugin))]
|
||||
#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))]
|
||||
|
||||
// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref.
|
||||
#![cfg_attr(feature="dev", allow(clone_on_copy))]
|
||||
#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))]
|
||||
|
||||
//! Blockchain sync module
|
||||
//! Implements ethereum protocol version 63 as specified here:
|
||||
@ -72,6 +72,7 @@ mod chain;
|
||||
mod io;
|
||||
mod range_collection;
|
||||
mod transaction_queue;
|
||||
pub use transaction_queue::TransactionQueue;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
@ -93,6 +94,12 @@ impl Default for SyncConfig {
|
||||
}
|
||||
}
|
||||
|
||||
/// Current sync status
|
||||
pub trait SyncStatusProvider: Send + Sync {
|
||||
/// Get sync status
|
||||
fn status(&self) -> SyncStatus;
|
||||
}
|
||||
|
||||
/// Ethereum network protocol handler
|
||||
pub struct EthSync {
|
||||
/// Shared blockchain client. TODO: this should evetually become an IPC endpoint
|
||||
@ -114,11 +121,6 @@ impl EthSync {
|
||||
sync
|
||||
}
|
||||
|
||||
/// Get sync status
|
||||
pub fn status(&self) -> SyncStatus {
|
||||
self.sync.read().unwrap().status()
|
||||
}
|
||||
|
||||
/// Stop sync
|
||||
pub fn stop(&mut self, io: &mut NetworkContext<SyncMessage>) {
|
||||
self.sync.write().unwrap().abort(&mut NetSyncIo::new(io, self.chain.deref()));
|
||||
@ -128,6 +130,22 @@ impl EthSync {
|
||||
pub fn restart(&mut self, io: &mut NetworkContext<SyncMessage>) {
|
||||
self.sync.write().unwrap().restart(&mut NetSyncIo::new(io, self.chain.deref()));
|
||||
}
|
||||
|
||||
/// Insert transaction in transaction queue
|
||||
pub fn insert_transaction(&self, transaction: ethcore::transaction::SignedTransaction) {
|
||||
use util::numbers::*;
|
||||
|
||||
let nonce_fn = |a: &Address| self.chain.state().nonce(a) + U256::one();
|
||||
let sync = self.sync.write().unwrap();
|
||||
sync.insert_transaction(transaction, &nonce_fn);
|
||||
}
|
||||
}
|
||||
|
||||
impl SyncStatusProvider for EthSync {
|
||||
/// Get sync status
|
||||
fn status(&self) -> SyncStatus {
|
||||
self.sync.read().unwrap().status()
|
||||
}
|
||||
}
|
||||
|
||||
impl NetworkProtocolHandler<SyncMessage> for EthSync {
|
||||
@ -154,13 +172,11 @@ impl NetworkProtocolHandler<SyncMessage> for EthSync {
|
||||
|
||||
fn message(&self, io: &NetworkContext<SyncMessage>, message: &SyncMessage) {
|
||||
match *message {
|
||||
SyncMessage::BlockVerified => {
|
||||
self.sync.write().unwrap().chain_blocks_verified(&mut NetSyncIo::new(io, self.chain.deref()));
|
||||
SyncMessage::NewChainBlocks { ref good, ref bad, ref retracted } => {
|
||||
let mut sync_io = NetSyncIo::new(io, self.chain.deref());
|
||||
self.sync.write().unwrap().chain_new_blocks(&mut sync_io, good, bad, retracted);
|
||||
},
|
||||
SyncMessage::NewChainBlocks { ref good, ref retracted } => {
|
||||
let sync_io = NetSyncIo::new(io, self.chain.deref());
|
||||
self.sync.write().unwrap().chain_new_blocks(&sync_io, good, retracted);
|
||||
}
|
||||
_ => {/* Ignore other messages */},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -42,6 +42,8 @@ pub trait RangeCollection<K, V> {
|
||||
fn remove_head(&mut self, start: &K);
|
||||
/// Remove all elements >= `start` in the range that contains `start`
|
||||
fn remove_tail(&mut self, start: &K);
|
||||
/// Remove all elements >= `start`
|
||||
fn remove_from(&mut self, start: &K);
|
||||
/// Remove all elements >= `tail`
|
||||
fn insert_item(&mut self, key: K, value: V);
|
||||
/// Get an iterator over ranges
|
||||
@ -137,6 +139,28 @@ impl<K, V> RangeCollection<K, V> for Vec<(K, Vec<V>)> where K: Ord + PartialEq +
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove the element and all following it.
|
||||
fn remove_from(&mut self, key: &K) {
|
||||
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
|
||||
Ok(index) => { self.drain(.. index + 1); },
|
||||
Err(index) =>{
|
||||
let mut empty = false;
|
||||
match self.get_mut(index) {
|
||||
Some(&mut (ref k, ref mut v)) if k <= key && (*k + FromUsize::from_usize(v.len())) > *key => {
|
||||
v.truncate((*key - *k).to_usize());
|
||||
empty = v.is_empty();
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
if empty {
|
||||
self.drain(.. index + 1);
|
||||
} else {
|
||||
self.drain(.. index);
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove range elements up to key
|
||||
fn remove_head(&mut self, key: &K) {
|
||||
if *key == FromUsize::from_usize(0) {
|
||||
@ -207,7 +231,7 @@ impl<K, V> RangeCollection<K, V> for Vec<(K, Vec<V>)> where K: Ord + PartialEq +
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
||||
fn test_range() {
|
||||
use std::cmp::{Ordering};
|
||||
|
||||
@ -272,5 +296,17 @@ fn test_range() {
|
||||
assert_eq!(r.range_iter().cmp(vec![(2, &['b'][..])]), Ordering::Equal);
|
||||
r.remove_tail(&2);
|
||||
assert_eq!(r.range_iter().next(), None);
|
||||
|
||||
let mut r = ranges.clone();
|
||||
r.remove_from(&20);
|
||||
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
|
||||
r.remove_from(&17);
|
||||
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p'][..])]), Ordering::Equal);
|
||||
r.remove_from(&15);
|
||||
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..])]), Ordering::Equal);
|
||||
r.remove_from(&3);
|
||||
assert_eq!(r.range_iter().cmp(vec![(2, &['b'][..])]), Ordering::Equal);
|
||||
r.remove_from(&2);
|
||||
assert_eq!(r.range_iter().next(), None);
|
||||
}
|
||||
|
||||
|
@ -129,8 +129,8 @@ fn propagate_hashes() {
|
||||
|
||||
net.peer_mut(0).chain.add_blocks(10, EachBlockWith::Uncle);
|
||||
net.sync();
|
||||
net.trigger_block_verified(0); //first event just sets the marker
|
||||
net.trigger_block_verified(0);
|
||||
net.trigger_chain_new_blocks(0); //first event just sets the marker
|
||||
net.trigger_chain_new_blocks(0);
|
||||
|
||||
// 5 peers to sync
|
||||
assert_eq!(5, net.peer(0).queue.len());
|
||||
@ -154,8 +154,8 @@ fn propagate_blocks() {
|
||||
net.sync();
|
||||
|
||||
net.peer_mut(0).chain.add_blocks(10, EachBlockWith::Uncle);
|
||||
net.trigger_block_verified(0); //first event just sets the marker
|
||||
net.trigger_block_verified(0);
|
||||
net.trigger_chain_new_blocks(0); //first event just sets the marker
|
||||
net.trigger_chain_new_blocks(0);
|
||||
|
||||
assert!(!net.peer(0).queue.is_empty());
|
||||
// NEW_BLOCK_PACKET
|
||||
|
@ -25,6 +25,7 @@ use ethcore::receipt::Receipt;
|
||||
use ethcore::transaction::{LocalizedTransaction, Transaction, Action};
|
||||
use ethcore::filter::Filter;
|
||||
use ethcore::log_entry::LocalizedLogEntry;
|
||||
use ethcore::block::ClosedBlock;
|
||||
|
||||
pub struct TestBlockChainClient {
|
||||
pub blocks: RwLock<HashMap<H256, Bytes>>,
|
||||
@ -160,6 +161,14 @@ impl BlockChainClient for TestBlockChainClient {
|
||||
unimplemented!();
|
||||
}
|
||||
|
||||
fn sealing_block(&self) -> &Mutex<Option<ClosedBlock>> {
|
||||
unimplemented!();
|
||||
}
|
||||
|
||||
fn submit_seal(&self, _pow_hash: H256, _seal: Vec<Bytes>) -> Result<(), Error> {
|
||||
unimplemented!();
|
||||
}
|
||||
|
||||
fn block_header(&self, id: BlockId) -> Option<Bytes> {
|
||||
self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).map(|r| Rlp::new(r).at(0).as_raw().to_vec()))
|
||||
}
|
||||
@ -455,8 +464,8 @@ impl TestNet {
|
||||
self.peers.iter().all(|p| p.queue.is_empty())
|
||||
}
|
||||
|
||||
pub fn trigger_block_verified(&mut self, peer_id: usize) {
|
||||
pub fn trigger_chain_new_blocks(&mut self, peer_id: usize) {
|
||||
let mut peer = self.peer_mut(peer_id);
|
||||
peer.sync.chain_blocks_verified(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None));
|
||||
peer.sync.chain_new_blocks(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None), &[], &[], &[]);
|
||||
}
|
||||
}
|
||||
|
@ -17,6 +17,67 @@
|
||||
// TODO [todr] - own transactions should have higher priority
|
||||
|
||||
//! Transaction Queue
|
||||
//!
|
||||
//! TransactionQueue keeps track of all transactions seen by the node (received from other peers) and own transactions
|
||||
//! and orders them by priority. Top priority transactions are those with low nonce height (difference between
|
||||
//! transaction's nonce and next nonce expected from this sender). If nonces are equal transaction's gas price is used
|
||||
//! for comparison (higher gas price = higher priority).
|
||||
//!
|
||||
//! # Usage Example
|
||||
//!
|
||||
//! ```rust
|
||||
//! extern crate ethcore_util as util;
|
||||
//! extern crate ethcore;
|
||||
//! extern crate ethsync;
|
||||
//! extern crate rustc_serialize;
|
||||
//!
|
||||
//! use util::crypto::KeyPair;
|
||||
//! use util::hash::Address;
|
||||
//! use util::numbers::{Uint, U256};
|
||||
//! use ethsync::TransactionQueue;
|
||||
//! use ethcore::transaction::*;
|
||||
//! use rustc_serialize::hex::FromHex;
|
||||
//!
|
||||
//! fn main() {
|
||||
//! let key = KeyPair::create().unwrap();
|
||||
//! let t1 = Transaction { action: Action::Create, value: U256::from(100), data: "3331600055".from_hex().unwrap(),
|
||||
//! gas: U256::from(100_000), gas_price: U256::one(), nonce: U256::from(10) };
|
||||
//! let t2 = Transaction { action: Action::Create, value: U256::from(100), data: "3331600055".from_hex().unwrap(),
|
||||
//! gas: U256::from(100_000), gas_price: U256::one(), nonce: U256::from(11) };
|
||||
//!
|
||||
//! let st1 = t1.sign(&key.secret());
|
||||
//! let st2 = t2.sign(&key.secret());
|
||||
//! let default_nonce = |_a: &Address| U256::from(10);
|
||||
//!
|
||||
//! let mut txq = TransactionQueue::new();
|
||||
//! txq.add(st2.clone(), &default_nonce);
|
||||
//! txq.add(st1.clone(), &default_nonce);
|
||||
//!
|
||||
//! // Check status
|
||||
//! assert_eq!(txq.status().pending, 2);
|
||||
//! // Check top transactions
|
||||
//! let top = txq.top_transactions(3);
|
||||
//! assert_eq!(top.len(), 2);
|
||||
//! assert_eq!(top[0], st1);
|
||||
//! assert_eq!(top[1], st2);
|
||||
//!
|
||||
//! // And when transaction is removed (but nonce haven't changed)
|
||||
//! // it will move invalid transactions to future
|
||||
//! txq.remove(&st1.hash(), &default_nonce);
|
||||
//! assert_eq!(txq.status().pending, 0);
|
||||
//! assert_eq!(txq.status().future, 1);
|
||||
//! assert_eq!(txq.top_transactions(3).len(), 0);
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! # Maintaing valid state
|
||||
//!
|
||||
//! 1. Whenever transaction is imported to queue (to queue) all other transactions from this sender are revalidated in current. It means that they are moved to future and back again (height recalculation & gap filling).
|
||||
//! 2. Whenever transaction is removed:
|
||||
//! - When it's removed from `future` - all `future` transactions heights are recalculated and then
|
||||
//! we check if the transactions should go to `current` (comparing state nonce)
|
||||
//! - When it's removed from `current` - all transactions from this sender (`current` & `future`) are recalculated.
|
||||
//!
|
||||
|
||||
use std::cmp::{Ordering};
|
||||
use std::collections::{HashMap, BTreeSet};
|
||||
@ -24,12 +85,20 @@ use util::numbers::{Uint, U256};
|
||||
use util::hash::{Address, H256};
|
||||
use util::table::*;
|
||||
use ethcore::transaction::*;
|
||||
use ethcore::error::Error;
|
||||
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
/// Light structure used to identify transaction and it's order
|
||||
struct TransactionOrder {
|
||||
/// Primary ordering factory. Difference between transaction nonce and expected nonce in state
|
||||
/// (e.g. Tx(nonce:5), State(nonce:0) -> height: 5)
|
||||
/// High nonce_height = Low priority (processed later)
|
||||
nonce_height: U256,
|
||||
/// Gas Price of the transaction.
|
||||
/// Low gas price = Low priority (processed later)
|
||||
gas_price: U256,
|
||||
/// Hash to identify associated transaction
|
||||
hash: H256,
|
||||
}
|
||||
|
||||
@ -70,7 +139,7 @@ impl Ord for TransactionOrder {
|
||||
let a_gas = self.gas_price;
|
||||
let b_gas = b.gas_price;
|
||||
if a_gas != b_gas {
|
||||
return a_gas.cmp(&b_gas);
|
||||
return b_gas.cmp(&a_gas);
|
||||
}
|
||||
|
||||
// Compare hashes
|
||||
@ -78,14 +147,16 @@ impl Ord for TransactionOrder {
|
||||
}
|
||||
}
|
||||
|
||||
/// Verified transaction (with sender)
|
||||
struct VerifiedTransaction {
|
||||
transaction: SignedTransaction
|
||||
}
|
||||
impl VerifiedTransaction {
|
||||
fn new(transaction: SignedTransaction) -> Self {
|
||||
VerifiedTransaction {
|
||||
fn new(transaction: SignedTransaction) -> Result<Self, Error> {
|
||||
try!(transaction.sender());
|
||||
Ok(VerifiedTransaction {
|
||||
transaction: transaction
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn hash(&self) -> H256 {
|
||||
@ -101,6 +172,11 @@ impl VerifiedTransaction {
|
||||
}
|
||||
}
|
||||
|
||||
/// Holds transactions accessible by (address, nonce) and by priority
|
||||
///
|
||||
/// TransactionSet keeps number of entries below limit, but it doesn't
|
||||
/// automatically happen during `insert/remove` operations.
|
||||
/// You have to call `enforce_limit` to remove lowest priority transactions from set.
|
||||
struct TransactionSet {
|
||||
by_priority: BTreeSet<TransactionOrder>,
|
||||
by_address: Table<Address, U256, TransactionOrder>,
|
||||
@ -108,30 +184,37 @@ struct TransactionSet {
|
||||
}
|
||||
|
||||
impl TransactionSet {
|
||||
fn insert(&mut self, sender: Address, nonce: U256, order: TransactionOrder) {
|
||||
/// Inserts `TransactionOrder` to this set
|
||||
fn insert(&mut self, sender: Address, nonce: U256, order: TransactionOrder) -> Option<TransactionOrder> {
|
||||
self.by_priority.insert(order.clone());
|
||||
self.by_address.insert(sender, nonce, order);
|
||||
self.by_address.insert(sender, nonce, order)
|
||||
}
|
||||
|
||||
fn enforce_limit(&mut self, by_hash: &HashMap<H256, VerifiedTransaction>) {
|
||||
/// Remove low priority transactions if there is more then specified by given `limit`.
|
||||
///
|
||||
/// It drops transactions from this set but also removes associated `VerifiedTransaction`.
|
||||
fn enforce_limit(&mut self, by_hash: &mut HashMap<H256, VerifiedTransaction>) {
|
||||
let len = self.by_priority.len();
|
||||
if len <= self.limit {
|
||||
return;
|
||||
}
|
||||
|
||||
let to_drop : Vec<&VerifiedTransaction> = {
|
||||
let to_drop : Vec<(Address, U256)> = {
|
||||
self.by_priority
|
||||
.iter()
|
||||
.skip(self.limit)
|
||||
.map(|order| by_hash.get(&order.hash).expect("Inconsistency in queue detected."))
|
||||
.map(|tx| (tx.sender(), tx.nonce()))
|
||||
.collect()
|
||||
};
|
||||
|
||||
for tx in to_drop {
|
||||
self.drop(&tx.sender(), &tx.nonce());
|
||||
for (sender, nonce) in to_drop {
|
||||
let order = self.drop(&sender, &nonce).expect("Dropping transaction found in priority queue failed.");
|
||||
by_hash.remove(&order.hash).expect("Inconsistency in queue.");
|
||||
}
|
||||
}
|
||||
|
||||
/// Drop transaction from this set (remove from `by_priority` and `by_address`)
|
||||
fn drop(&mut self, sender: &Address, nonce: &U256) -> Option<TransactionOrder> {
|
||||
if let Some(tx_order) = self.by_address.remove(sender, nonce) {
|
||||
self.by_priority.remove(&tx_order);
|
||||
@ -140,12 +223,15 @@ impl TransactionSet {
|
||||
None
|
||||
}
|
||||
|
||||
/// Drop all transactions.
|
||||
fn clear(&mut self) {
|
||||
self.by_priority.clear();
|
||||
self.by_address.clear();
|
||||
}
|
||||
}
|
||||
|
||||
// Will be used when rpc merged
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug)]
|
||||
/// Current status of the queue
|
||||
pub struct TransactionQueueStatus {
|
||||
@ -194,6 +280,8 @@ impl TransactionQueue {
|
||||
}
|
||||
}
|
||||
|
||||
// Will be used when rpc merged
|
||||
#[allow(dead_code)]
|
||||
/// Returns current status for this queue
|
||||
pub fn status(&self) -> TransactionQueueStatus {
|
||||
TransactionQueueStatus {
|
||||
@ -203,17 +291,19 @@ impl TransactionQueue {
|
||||
}
|
||||
|
||||
/// Adds all signed transactions to queue to be verified and imported
|
||||
pub fn add_all<T>(&mut self, txs: Vec<SignedTransaction>, fetch_nonce: T)
|
||||
pub fn add_all<T>(&mut self, txs: Vec<SignedTransaction>, fetch_nonce: T) -> Result<(), Error>
|
||||
where T: Fn(&Address) -> U256 {
|
||||
for tx in txs.into_iter() {
|
||||
self.add(tx, &fetch_nonce);
|
||||
try!(self.add(tx, &fetch_nonce));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Add signed transaction to queue to be verified and imported
|
||||
pub fn add<T>(&mut self, tx: SignedTransaction, fetch_nonce: &T)
|
||||
pub fn add<T>(&mut self, tx: SignedTransaction, fetch_nonce: &T) -> Result<(), Error>
|
||||
where T: Fn(&Address) -> U256 {
|
||||
self.import_tx(VerifiedTransaction::new(tx), fetch_nonce);
|
||||
self.import_tx(try!(VerifiedTransaction::new(tx)), fetch_nonce);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Removes all transactions identified by hashes given in slice
|
||||
@ -236,26 +326,60 @@ impl TransactionQueue {
|
||||
// We don't know this transaction
|
||||
return;
|
||||
}
|
||||
|
||||
let transaction = transaction.unwrap();
|
||||
let sender = transaction.sender();
|
||||
let nonce = transaction.nonce();
|
||||
let current_nonce = fetch_nonce(&sender);
|
||||
|
||||
// Remove from future
|
||||
self.future.drop(&sender, &nonce);
|
||||
|
||||
// Remove from current
|
||||
let order = self.current.drop(&sender, &nonce);
|
||||
if order.is_none() {
|
||||
let order = self.future.drop(&sender, &nonce);
|
||||
if order.is_some() {
|
||||
self.update_future(&sender, current_nonce);
|
||||
// And now lets check if there is some chain of transactions in future
|
||||
// that should be placed in current
|
||||
self.move_matching_future_to_current(sender.clone(), current_nonce, current_nonce);
|
||||
return;
|
||||
}
|
||||
|
||||
// Let's remove transactions where tx.nonce < current_nonce
|
||||
// and if there are any future transactions matching current_nonce+1 - move to current
|
||||
let current_nonce = fetch_nonce(&sender);
|
||||
// We will either move transaction to future or remove it completely
|
||||
// so there will be no transactions from this sender in current
|
||||
self.last_nonces.remove(&sender);
|
||||
// Remove from current
|
||||
let order = self.current.drop(&sender, &nonce);
|
||||
if order.is_some() {
|
||||
// We will either move transaction to future or remove it completely
|
||||
// so there will be no transactions from this sender in current
|
||||
self.last_nonces.remove(&sender);
|
||||
// First update height of transactions in future to avoid collisions
|
||||
self.update_future(&sender, current_nonce);
|
||||
// This should move all current transactions to future and remove old transactions
|
||||
self.move_all_to_future(&sender, current_nonce);
|
||||
// And now lets check if there is some chain of transactions in future
|
||||
// that should be placed in current. It should also update last_nonces.
|
||||
self.move_matching_future_to_current(sender.clone(), current_nonce, current_nonce);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/// Update height of all transactions in future transactions set.
|
||||
fn update_future(&mut self, sender: &Address, current_nonce: U256) {
|
||||
// We need to drain all transactions for current sender from future and reinsert them with updated height
|
||||
let all_nonces_from_sender = match self.future.by_address.row(&sender) {
|
||||
Some(row_map) => row_map.keys().cloned().collect::<Vec<U256>>(),
|
||||
None => vec![],
|
||||
};
|
||||
for k in all_nonces_from_sender {
|
||||
let order = self.future.drop(&sender, &k).unwrap();
|
||||
if k >= current_nonce {
|
||||
self.future.insert(sender.clone(), k, order.update_height(k, current_nonce));
|
||||
} else {
|
||||
// Remove the transaction completely
|
||||
self.by_hash.remove(&order.hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Drop all transactions from given sender from `current`.
|
||||
/// Either moves them to `future` or removes them from queue completely.
|
||||
fn move_all_to_future(&mut self, sender: &Address, current_nonce: U256) {
|
||||
let all_nonces_from_sender = match self.current.by_address.row(&sender) {
|
||||
Some(row_map) => row_map.keys().cloned().collect::<Vec<U256>>(),
|
||||
None => vec![],
|
||||
@ -270,16 +394,12 @@ impl TransactionQueue {
|
||||
self.by_hash.remove(&order.hash);
|
||||
}
|
||||
}
|
||||
self.future.enforce_limit(&self.by_hash);
|
||||
|
||||
// And now lets check if there is some chain of transactions in future
|
||||
// that should be placed in current
|
||||
if let Some(new_current_top) = self.move_future_txs(sender.clone(), current_nonce, current_nonce) {
|
||||
self.last_nonces.insert(sender, new_current_top);
|
||||
}
|
||||
self.future.enforce_limit(&mut self.by_hash);
|
||||
}
|
||||
|
||||
/// Returns top transactions from the queue
|
||||
// Will be used when mining merged
|
||||
#[allow(dead_code)]
|
||||
/// Returns top transactions from the queue ordered by priority.
|
||||
pub fn top_transactions(&self, size: usize) -> Vec<SignedTransaction> {
|
||||
self.current.by_priority
|
||||
.iter()
|
||||
@ -297,11 +417,13 @@ impl TransactionQueue {
|
||||
self.last_nonces.clear();
|
||||
}
|
||||
|
||||
fn move_future_txs(&mut self, address: Address, mut current_nonce: U256, first_nonce: U256) -> Option<U256> {
|
||||
/// Checks if there are any transactions in `future` that should actually be promoted to `current`
|
||||
/// (because nonce matches).
|
||||
fn move_matching_future_to_current(&mut self, address: Address, mut current_nonce: U256, first_nonce: U256) {
|
||||
{
|
||||
let by_nonce = self.future.by_address.row_mut(&address);
|
||||
if let None = by_nonce {
|
||||
return None;
|
||||
return;
|
||||
}
|
||||
let mut by_nonce = by_nonce.unwrap();
|
||||
while let Some(order) = by_nonce.remove(¤t_nonce) {
|
||||
@ -314,47 +436,83 @@ impl TransactionQueue {
|
||||
}
|
||||
}
|
||||
self.future.by_address.clear_if_empty(&address);
|
||||
// Returns last inserted nonce
|
||||
Some(current_nonce - U256::one())
|
||||
// Update last inserted nonce
|
||||
self.last_nonces.insert(address, current_nonce - U256::one());
|
||||
}
|
||||
|
||||
/// Adds VerifiedTransaction to this queue.
|
||||
///
|
||||
/// Determines if it should be placed in current or future. When transaction is
|
||||
/// imported to `current` also checks if there are any `future` transactions that should be promoted because of
|
||||
/// this.
|
||||
///
|
||||
/// It ignores transactions that has already been imported (same `hash`) and replaces the transaction
|
||||
/// iff `(address, nonce)` is the same but `gas_price` is higher.
|
||||
fn import_tx<T>(&mut self, tx: VerifiedTransaction, fetch_nonce: &T)
|
||||
where T: Fn(&Address) -> U256 {
|
||||
let nonce = tx.nonce();
|
||||
let address = tx.sender();
|
||||
|
||||
if self.by_hash.get(&tx.hash()).is_some() {
|
||||
// Transaction is already imported.
|
||||
trace!(target: "sync", "Dropping already imported transaction with hash: {:?}", tx.hash());
|
||||
return;
|
||||
}
|
||||
|
||||
let address = tx.sender();
|
||||
let nonce = tx.nonce();
|
||||
|
||||
let state_nonce = fetch_nonce(&address);
|
||||
let next_nonce = self.last_nonces
|
||||
.get(&address)
|
||||
.cloned()
|
||||
.map_or_else(|| fetch_nonce(&address), |n| n + U256::one());
|
||||
.map_or(state_nonce, |n| n + U256::one());
|
||||
|
||||
// Check height
|
||||
if nonce > next_nonce {
|
||||
let order = TransactionOrder::for_transaction(&tx, next_nonce);
|
||||
// Insert to by_hash
|
||||
self.by_hash.insert(tx.hash(), tx);
|
||||
// We have a gap - put to future
|
||||
self.future.insert(address, nonce, order);
|
||||
self.future.enforce_limit(&self.by_hash);
|
||||
Self::replace_transaction(tx, next_nonce, &mut self.future, &mut self.by_hash);
|
||||
self.future.enforce_limit(&mut self.by_hash);
|
||||
return;
|
||||
} else if next_nonce > nonce {
|
||||
} else if nonce < state_nonce {
|
||||
// Droping transaction
|
||||
trace!(target: "sync", "Dropping transaction with nonce: {} - expecting: {}", nonce, next_nonce);
|
||||
return;
|
||||
}
|
||||
|
||||
let base_nonce = fetch_nonce(&address);
|
||||
let order = TransactionOrder::for_transaction(&tx, base_nonce);
|
||||
// Insert to by_hash
|
||||
self.by_hash.insert(tx.hash(), tx);
|
||||
|
||||
// Insert to current
|
||||
self.current.insert(address.clone(), nonce, order);
|
||||
Self::replace_transaction(tx, base_nonce.clone(), &mut self.current, &mut self.by_hash);
|
||||
self.last_nonces.insert(address.clone(), nonce);
|
||||
// But maybe there are some more items waiting in future?
|
||||
let new_last_nonce = self.move_future_txs(address.clone(), nonce + U256::one(), base_nonce);
|
||||
self.last_nonces.insert(address.clone(), new_last_nonce.unwrap_or(nonce));
|
||||
// Enforce limit
|
||||
self.current.enforce_limit(&self.by_hash);
|
||||
self.move_matching_future_to_current(address.clone(), nonce + U256::one(), base_nonce);
|
||||
self.current.enforce_limit(&mut self.by_hash);
|
||||
}
|
||||
|
||||
/// Replaces transaction in given set (could be `future` or `current`).
|
||||
///
|
||||
/// If there is already transaction with same `(sender, nonce)` it will be replaced iff `gas_price` is higher.
|
||||
/// One of the transactions is dropped from set and also removed from queue entirely (from `by_hash`).
|
||||
fn replace_transaction(tx: VerifiedTransaction, base_nonce: U256, set: &mut TransactionSet, by_hash: &mut HashMap<H256, VerifiedTransaction>) {
|
||||
let order = TransactionOrder::for_transaction(&tx, base_nonce);
|
||||
let hash = tx.hash();
|
||||
let address = tx.sender();
|
||||
let nonce = tx.nonce();
|
||||
|
||||
by_hash.insert(hash.clone(), tx);
|
||||
if let Some(old) = set.insert(address, nonce, order.clone()) {
|
||||
// There was already transaction in queue. Let's check which one should stay
|
||||
let old_fee = old.gas_price;
|
||||
let new_fee = order.gas_price;
|
||||
if old_fee.cmp(&new_fee) == Ordering::Greater {
|
||||
// Put back old transaction since it has greater priority (higher gas_price)
|
||||
set.by_address.insert(address, nonce, old);
|
||||
// and remove new one
|
||||
set.by_priority.remove(&order);
|
||||
by_hash.remove(&hash);
|
||||
} else {
|
||||
// Make sure we remove old transaction entirely
|
||||
set.by_priority.remove(&old);
|
||||
by_hash.remove(&old.hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -362,12 +520,8 @@ impl TransactionQueue {
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
extern crate rustc_serialize;
|
||||
use self::rustc_serialize::hex::FromHex;
|
||||
use std::collections::{HashMap, BTreeSet};
|
||||
use util::crypto::KeyPair;
|
||||
use util::numbers::{U256, Uint};
|
||||
use util::hash::{Address};
|
||||
use util::table::*;
|
||||
use util::*;
|
||||
use ethcore::transaction::*;
|
||||
use super::*;
|
||||
use super::{TransactionSet, TransactionOrder, VerifiedTransaction};
|
||||
@ -411,12 +565,12 @@ mod test {
|
||||
limit: 1
|
||||
};
|
||||
let (tx1, tx2) = new_txs(U256::from(1));
|
||||
let tx1 = VerifiedTransaction::new(tx1);
|
||||
let tx2 = VerifiedTransaction::new(tx2);
|
||||
let by_hash = {
|
||||
let tx1 = VerifiedTransaction::new(tx1).unwrap();
|
||||
let tx2 = VerifiedTransaction::new(tx2).unwrap();
|
||||
let mut by_hash = {
|
||||
let mut x = HashMap::new();
|
||||
let tx1 = VerifiedTransaction::new(tx1.transaction.clone());
|
||||
let tx2 = VerifiedTransaction::new(tx2.transaction.clone());
|
||||
let tx1 = VerifiedTransaction::new(tx1.transaction.clone()).unwrap();
|
||||
let tx2 = VerifiedTransaction::new(tx2.transaction.clone()).unwrap();
|
||||
x.insert(tx1.hash(), tx1);
|
||||
x.insert(tx2.hash(), tx2);
|
||||
x
|
||||
@ -430,9 +584,10 @@ mod test {
|
||||
assert_eq!(set.by_address.len(), 2);
|
||||
|
||||
// when
|
||||
set.enforce_limit(&by_hash);
|
||||
set.enforce_limit(&mut by_hash);
|
||||
|
||||
// then
|
||||
assert_eq!(by_hash.len(), 1);
|
||||
assert_eq!(set.by_priority.len(), 1);
|
||||
assert_eq!(set.by_address.len(), 1);
|
||||
assert_eq!(set.by_priority.iter().next().unwrap().clone(), order1);
|
||||
@ -449,13 +604,39 @@ mod test {
|
||||
let tx = new_tx();
|
||||
|
||||
// when
|
||||
txq.add(tx, &default_nonce);
|
||||
let res = txq.add(tx, &default_nonce);
|
||||
|
||||
// then
|
||||
assert!(res.is_ok());
|
||||
let stats = txq.status();
|
||||
assert_eq!(stats.pending, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_reject_incorectly_signed_transaction() {
|
||||
// given
|
||||
let mut txq = TransactionQueue::new();
|
||||
let tx = new_unsigned_tx(U256::from(123));
|
||||
let stx = {
|
||||
let mut s = RlpStream::new_list(9);
|
||||
s.append(&tx.nonce);
|
||||
s.append(&tx.gas_price);
|
||||
s.append(&tx.gas);
|
||||
s.append_empty_data(); // action=create
|
||||
s.append(&tx.value);
|
||||
s.append(&tx.data);
|
||||
s.append(&0u64); // v
|
||||
s.append(&U256::zero()); // r
|
||||
s.append(&U256::zero()); // s
|
||||
decode(s.as_raw())
|
||||
};
|
||||
// when
|
||||
let res = txq.add(stx, &default_nonce);
|
||||
|
||||
// then
|
||||
assert!(res.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_import_txs_from_same_sender() {
|
||||
// given
|
||||
@ -464,8 +645,8 @@ mod test {
|
||||
let (tx, tx2) = new_txs(U256::from(1));
|
||||
|
||||
// when
|
||||
txq.add(tx.clone(), &default_nonce);
|
||||
txq.add(tx2.clone(), &default_nonce);
|
||||
txq.add(tx.clone(), &default_nonce).unwrap();
|
||||
txq.add(tx2.clone(), &default_nonce).unwrap();
|
||||
|
||||
// then
|
||||
let top = txq.top_transactions(5);
|
||||
@ -482,8 +663,8 @@ mod test {
|
||||
let (tx, tx2) = new_txs(U256::from(2));
|
||||
|
||||
// when
|
||||
txq.add(tx.clone(), &default_nonce);
|
||||
txq.add(tx2.clone(), &default_nonce);
|
||||
txq.add(tx.clone(), &default_nonce).unwrap();
|
||||
txq.add(tx2.clone(), &default_nonce).unwrap();
|
||||
|
||||
// then
|
||||
let stats = txq.status();
|
||||
@ -494,6 +675,28 @@ mod test {
|
||||
assert_eq!(top[0], tx);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_correctly_update_futures_when_removing() {
|
||||
// given
|
||||
let prev_nonce = |a: &Address| default_nonce(a) - U256::one();
|
||||
let next2_nonce = |a: &Address| default_nonce(a) + U256::from(2);
|
||||
|
||||
let mut txq = TransactionQueue::new();
|
||||
|
||||
let (tx, tx2) = new_txs(U256::from(1));
|
||||
txq.add(tx.clone(), &prev_nonce);
|
||||
txq.add(tx2.clone(), &prev_nonce);
|
||||
assert_eq!(txq.status().future, 2);
|
||||
|
||||
// when
|
||||
txq.remove(&tx.hash(), &next2_nonce);
|
||||
// should remove both transactions since they are not valid
|
||||
|
||||
// then
|
||||
assert_eq!(txq.status().pending, 0);
|
||||
assert_eq!(txq.status().future, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_move_transactions_if_gap_filled() {
|
||||
// given
|
||||
@ -504,13 +707,13 @@ mod test {
|
||||
let tx1 = new_unsigned_tx(U256::from(124)).sign(&secret);
|
||||
let tx2 = new_unsigned_tx(U256::from(125)).sign(&secret);
|
||||
|
||||
txq.add(tx, &default_nonce);
|
||||
txq.add(tx, &default_nonce).unwrap();
|
||||
assert_eq!(txq.status().pending, 1);
|
||||
txq.add(tx2, &default_nonce);
|
||||
txq.add(tx2, &default_nonce).unwrap();
|
||||
assert_eq!(txq.status().future, 1);
|
||||
|
||||
// when
|
||||
txq.add(tx1, &default_nonce);
|
||||
txq.add(tx1, &default_nonce).unwrap();
|
||||
|
||||
// then
|
||||
let stats = txq.status();
|
||||
@ -523,8 +726,8 @@ mod test {
|
||||
// given
|
||||
let mut txq2 = TransactionQueue::new();
|
||||
let (tx, tx2) = new_txs(U256::from(3));
|
||||
txq2.add(tx.clone(), &default_nonce);
|
||||
txq2.add(tx2.clone(), &default_nonce);
|
||||
txq2.add(tx.clone(), &default_nonce).unwrap();
|
||||
txq2.add(tx2.clone(), &default_nonce).unwrap();
|
||||
assert_eq!(txq2.status().pending, 1);
|
||||
assert_eq!(txq2.status().future, 1);
|
||||
|
||||
@ -545,10 +748,10 @@ mod test {
|
||||
let mut txq = TransactionQueue::new();
|
||||
let (tx, tx2) = new_txs(U256::from(1));
|
||||
let tx3 = new_tx();
|
||||
txq.add(tx2.clone(), &default_nonce);
|
||||
txq.add(tx2.clone(), &default_nonce).unwrap();
|
||||
assert_eq!(txq.status().future, 1);
|
||||
txq.add(tx3.clone(), &default_nonce);
|
||||
txq.add(tx.clone(), &default_nonce);
|
||||
txq.add(tx3.clone(), &default_nonce).unwrap();
|
||||
txq.add(tx.clone(), &default_nonce).unwrap();
|
||||
assert_eq!(txq.status().pending, 3);
|
||||
|
||||
// when
|
||||
@ -567,8 +770,8 @@ mod test {
|
||||
let (tx, tx2) = new_txs(U256::one());
|
||||
|
||||
// add
|
||||
txq.add(tx2.clone(), &default_nonce);
|
||||
txq.add(tx.clone(), &default_nonce);
|
||||
txq.add(tx2.clone(), &default_nonce).unwrap();
|
||||
txq.add(tx.clone(), &default_nonce).unwrap();
|
||||
let stats = txq.status();
|
||||
assert_eq!(stats.pending, 2);
|
||||
|
||||
@ -585,11 +788,11 @@ mod test {
|
||||
// given
|
||||
let mut txq = TransactionQueue::with_limits(1, 1);
|
||||
let (tx, tx2) = new_txs(U256::one());
|
||||
txq.add(tx.clone(), &default_nonce);
|
||||
txq.add(tx.clone(), &default_nonce).unwrap();
|
||||
assert_eq!(txq.status().pending, 1);
|
||||
|
||||
// when
|
||||
txq.add(tx2.clone(), &default_nonce);
|
||||
txq.add(tx2.clone(), &default_nonce).unwrap();
|
||||
|
||||
// then
|
||||
let t = txq.top_transactions(2);
|
||||
@ -603,14 +806,14 @@ mod test {
|
||||
let mut txq = TransactionQueue::with_limits(10, 1);
|
||||
let (tx1, tx2) = new_txs(U256::from(4));
|
||||
let (tx3, tx4) = new_txs(U256::from(4));
|
||||
txq.add(tx1.clone(), &default_nonce);
|
||||
txq.add(tx3.clone(), &default_nonce);
|
||||
txq.add(tx1.clone(), &default_nonce).unwrap();
|
||||
txq.add(tx3.clone(), &default_nonce).unwrap();
|
||||
assert_eq!(txq.status().pending, 2);
|
||||
|
||||
// when
|
||||
txq.add(tx2.clone(), &default_nonce);
|
||||
txq.add(tx2.clone(), &default_nonce).unwrap();
|
||||
assert_eq!(txq.status().future, 1);
|
||||
txq.add(tx4.clone(), &default_nonce);
|
||||
txq.add(tx4.clone(), &default_nonce).unwrap();
|
||||
|
||||
// then
|
||||
assert_eq!(txq.status().future, 1);
|
||||
@ -624,7 +827,7 @@ mod test {
|
||||
let fetch_last_nonce = |_a: &Address| last_nonce;
|
||||
|
||||
// when
|
||||
txq.add(tx, &fetch_last_nonce);
|
||||
txq.add(tx, &fetch_last_nonce).unwrap();
|
||||
|
||||
// then
|
||||
let stats = txq.status();
|
||||
@ -633,19 +836,38 @@ mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_accept_same_transaction_twice() {
|
||||
fn should_not_insert_same_transaction_twice() {
|
||||
// given
|
||||
let nonce = |a: &Address| default_nonce(a) + U256::one();
|
||||
let mut txq = TransactionQueue::new();
|
||||
let (_tx1, tx2) = new_txs(U256::from(1));
|
||||
txq.add(tx2.clone(), &default_nonce).unwrap();
|
||||
assert_eq!(txq.status().future, 1);
|
||||
assert_eq!(txq.status().pending, 0);
|
||||
|
||||
// when
|
||||
txq.add(tx2.clone(), &nonce).unwrap();
|
||||
|
||||
// then
|
||||
let stats = txq.status();
|
||||
assert_eq!(stats.future, 1);
|
||||
assert_eq!(stats.pending, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_accept_same_transaction_twice_if_removed() {
|
||||
// given
|
||||
let mut txq = TransactionQueue::new();
|
||||
let (tx1, tx2) = new_txs(U256::from(1));
|
||||
txq.add(tx1.clone(), &default_nonce);
|
||||
txq.add(tx2.clone(), &default_nonce);
|
||||
txq.add(tx1.clone(), &default_nonce).unwrap();
|
||||
txq.add(tx2.clone(), &default_nonce).unwrap();
|
||||
assert_eq!(txq.status().pending, 2);
|
||||
|
||||
// when
|
||||
txq.remove(&tx1.hash(), &default_nonce);
|
||||
assert_eq!(txq.status().pending, 0);
|
||||
assert_eq!(txq.status().future, 1);
|
||||
txq.add(tx1.clone(), &default_nonce);
|
||||
txq.add(tx1.clone(), &default_nonce).unwrap();
|
||||
|
||||
// then
|
||||
let stats = txq.status();
|
||||
@ -660,10 +882,10 @@ mod test {
|
||||
let mut txq = TransactionQueue::new();
|
||||
let (tx, tx2) = new_txs(U256::from(1));
|
||||
let tx3 = new_tx();
|
||||
txq.add(tx2.clone(), &default_nonce);
|
||||
txq.add(tx2.clone(), &default_nonce).unwrap();
|
||||
assert_eq!(txq.status().future, 1);
|
||||
txq.add(tx3.clone(), &default_nonce);
|
||||
txq.add(tx.clone(), &default_nonce);
|
||||
txq.add(tx3.clone(), &default_nonce).unwrap();
|
||||
txq.add(tx.clone(), &default_nonce).unwrap();
|
||||
assert_eq!(txq.status().pending, 3);
|
||||
|
||||
// when
|
||||
@ -675,4 +897,76 @@ mod test {
|
||||
assert_eq!(stats.pending, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_replace_same_transaction_when_has_higher_fee() {
|
||||
// given
|
||||
let mut txq = TransactionQueue::new();
|
||||
let keypair = KeyPair::create().unwrap();
|
||||
let tx = new_unsigned_tx(U256::from(123)).sign(&keypair.secret());
|
||||
let tx2 = {
|
||||
let mut tx2 = tx.deref().clone();
|
||||
tx2.gas_price = U256::from(200);
|
||||
tx2.sign(&keypair.secret())
|
||||
};
|
||||
|
||||
// when
|
||||
txq.add(tx, &default_nonce).unwrap();
|
||||
txq.add(tx2, &default_nonce).unwrap();
|
||||
|
||||
// then
|
||||
let stats = txq.status();
|
||||
assert_eq!(stats.pending, 1);
|
||||
assert_eq!(stats.future, 0);
|
||||
assert_eq!(txq.top_transactions(1)[0].gas_price, U256::from(200));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_replace_same_transaction_when_importing_to_futures() {
|
||||
// given
|
||||
let mut txq = TransactionQueue::new();
|
||||
let keypair = KeyPair::create().unwrap();
|
||||
let tx0 = new_unsigned_tx(U256::from(123)).sign(&keypair.secret());
|
||||
let tx1 = {
|
||||
let mut tx1 = tx0.deref().clone();
|
||||
tx1.nonce = U256::from(124);
|
||||
tx1.sign(&keypair.secret())
|
||||
};
|
||||
let tx2 = {
|
||||
let mut tx2 = tx1.deref().clone();
|
||||
tx2.gas_price = U256::from(200);
|
||||
tx2.sign(&keypair.secret())
|
||||
};
|
||||
|
||||
// when
|
||||
txq.add(tx1, &default_nonce).unwrap();
|
||||
txq.add(tx2, &default_nonce).unwrap();
|
||||
assert_eq!(txq.status().future, 1);
|
||||
txq.add(tx0, &default_nonce).unwrap();
|
||||
|
||||
// then
|
||||
let stats = txq.status();
|
||||
assert_eq!(stats.future, 0);
|
||||
assert_eq!(stats.pending, 2);
|
||||
assert_eq!(txq.top_transactions(2)[1].gas_price, U256::from(200));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_recalculate_height_when_removing_from_future() {
|
||||
// given
|
||||
let previous_nonce = |a: &Address| default_nonce(a) - U256::one();
|
||||
let next_nonce = |a: &Address| default_nonce(a) + U256::one();
|
||||
let mut txq = TransactionQueue::new();
|
||||
let (tx1, tx2) = new_txs(U256::one());
|
||||
txq.add(tx1.clone(), &previous_nonce).unwrap();
|
||||
txq.add(tx2, &previous_nonce).unwrap();
|
||||
assert_eq!(txq.status().future, 2);
|
||||
|
||||
// when
|
||||
txq.remove(&tx1.hash(), &next_nonce);
|
||||
|
||||
// then
|
||||
let stats = txq.status();
|
||||
assert_eq!(stats.future, 0);
|
||||
assert_eq!(stats.pending, 1);
|
||||
}
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ chrono = "0.2"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
dev = ["clippy"]
|
||||
dev = []
|
||||
|
||||
[build-dependencies]
|
||||
vergen = "*"
|
||||
|
@ -1103,7 +1103,7 @@ macro_rules! construct_uint {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="dev", allow(derive_hash_xor_eq))] // We are pretty sure it's ok.
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(derive_hash_xor_eq))] // We are pretty sure it's ok.
|
||||
impl Hash for $name {
|
||||
fn hash<H>(&self, state: &mut H) where H: Hasher {
|
||||
unsafe { state.write(::std::slice::from_raw_parts(self.0.as_ptr() as *mut u8, self.0.len() * 8)); }
|
||||
@ -1485,7 +1485,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(feature="dev", allow(eq_op))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(eq_op))]
|
||||
pub fn uint256_comp_test() {
|
||||
let small = U256([10u64, 0, 0, 0]);
|
||||
let big = U256([0x8C8C3EE70C644118u64, 0x0209E7378231E632, 0, 0]);
|
||||
@ -2032,7 +2032,7 @@ mod tests {
|
||||
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(feature = "dev", allow(cyclomatic_complexity))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
||||
fn u256_multi_full_mul() {
|
||||
let result = U256([0, 0, 0, 0]).full_mul(U256([0, 0, 0, 0]));
|
||||
assert_eq!(U512([0, 0, 0, 0, 0, 0, 0, 0]), result);
|
||||
|
@ -1,7 +1,28 @@
|
||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
extern crate rustc_version;
|
||||
extern crate vergen;
|
||||
|
||||
use vergen::*;
|
||||
use rustc_version::{version_meta, Channel};
|
||||
|
||||
fn main() {
|
||||
vergen(OutputFns::all()).unwrap();
|
||||
if let Channel::Nightly = version_meta().channel {
|
||||
println!("cargo:rustc-cfg=nightly");
|
||||
}
|
||||
}
|
||||
|
@ -305,7 +305,7 @@ macro_rules! impl_hash {
|
||||
}
|
||||
|
||||
impl Copy for $from {}
|
||||
#[cfg_attr(feature="dev", allow(expl_impl_clone_on_copy))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(expl_impl_clone_on_copy))]
|
||||
impl Clone for $from {
|
||||
fn clone(&self) -> $from {
|
||||
unsafe {
|
||||
@ -637,7 +637,7 @@ mod tests {
|
||||
use std::str::FromStr;
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(feature="dev", allow(eq_op))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(eq_op))]
|
||||
fn hash() {
|
||||
let h = H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]);
|
||||
assert_eq!(H64::from_str("0123456789abcdef").unwrap(), h);
|
||||
|
@ -153,7 +153,7 @@ struct UserTimer {
|
||||
pub struct IoManager<Message> where Message: Send + Sync {
|
||||
timers: Arc<RwLock<HashMap<HandlerId, UserTimer>>>,
|
||||
handlers: Vec<Arc<IoHandler<Message>>>,
|
||||
_workers: Vec<Worker>,
|
||||
workers: Vec<Worker>,
|
||||
worker_channel: chase_lev::Worker<Work<Message>>,
|
||||
work_ready: Arc<Condvar>,
|
||||
}
|
||||
@ -180,7 +180,7 @@ impl<Message> IoManager<Message> where Message: Send + Sync + Clone + 'static {
|
||||
timers: Arc::new(RwLock::new(HashMap::new())),
|
||||
handlers: Vec::new(),
|
||||
worker_channel: worker,
|
||||
_workers: workers,
|
||||
workers: workers,
|
||||
work_ready: work_ready,
|
||||
};
|
||||
try!(event_loop.run(&mut io));
|
||||
@ -230,7 +230,10 @@ impl<Message> Handler for IoManager<Message> where Message: Send + Clone + Sync
|
||||
|
||||
fn notify(&mut self, event_loop: &mut EventLoop<Self>, msg: Self::Message) {
|
||||
match msg {
|
||||
IoMessage::Shutdown => event_loop.shutdown(),
|
||||
IoMessage::Shutdown => {
|
||||
self.workers.clear();
|
||||
event_loop.shutdown();
|
||||
},
|
||||
IoMessage::AddHandler { handler } => {
|
||||
let handler_id = {
|
||||
self.handlers.push(handler.clone());
|
||||
|
@ -25,12 +25,9 @@ use kvdb::{Database, DBTransaction, DatabaseConfig};
|
||||
use std::env;
|
||||
|
||||
/// Implementation of the HashDB trait for a disk-backed database with a memory overlay
|
||||
/// and, possibly, latent-removal semantics.
|
||||
/// and latent-removal semantics.
|
||||
///
|
||||
/// If `counters` is `None`, then it behaves exactly like OverlayDB. If not it behaves
|
||||
/// differently:
|
||||
///
|
||||
/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to
|
||||
/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to
|
||||
/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect
|
||||
/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before
|
||||
/// the removals actually take effect.
|
||||
@ -60,7 +57,6 @@ const DB_VERSION_NO_JOURNAL : u32 = 3 + 256;
|
||||
const PADDING : [u8; 10] = [ 0u8; 10 ];
|
||||
|
||||
impl JournalDB {
|
||||
|
||||
/// Create a new instance from file
|
||||
pub fn new(path: &str) -> JournalDB {
|
||||
Self::from_prefs(path, true)
|
||||
@ -149,31 +145,133 @@ impl JournalDB {
|
||||
Ok(ret as u32)
|
||||
}
|
||||
|
||||
fn morph_key(key: &H256, index: u8) -> Bytes {
|
||||
let mut ret = key.bytes().to_owned();
|
||||
ret.push(index);
|
||||
ret
|
||||
}
|
||||
|
||||
// The next three are valid only as long as there is an insert operation of `key` in the journal.
|
||||
fn set_already_in(batch: &DBTransaction, key: &H256) { batch.put(&Self::morph_key(key, 0), &[1u8]).expect("Low-level database error. Some issue with your hard disk?"); }
|
||||
fn reset_already_in(batch: &DBTransaction, key: &H256) { batch.delete(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?"); }
|
||||
fn is_already_in(backing: &Database, key: &H256) -> bool {
|
||||
backing.get(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some()
|
||||
}
|
||||
|
||||
fn insert_keys(inserts: &[(H256, Bytes)], backing: &Database, counters: &mut HashMap<H256, i32>, batch: &DBTransaction) {
|
||||
for &(ref h, ref d) in inserts {
|
||||
if let Some(c) = counters.get_mut(h) {
|
||||
// already counting. increment.
|
||||
*c += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
// this is the first entry for this node in the journal.
|
||||
if backing.get(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?").is_some() {
|
||||
// already in the backing DB. start counting, and remember it was already in.
|
||||
Self::set_already_in(batch, &h);
|
||||
counters.insert(h.clone(), 1);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Gets removed when a key leaves the journal, so should never be set when we're placing a new key.
|
||||
//Self::reset_already_in(&h);
|
||||
assert!(!Self::is_already_in(backing, &h));
|
||||
batch.put(&h.bytes(), d).expect("Low-level database error. Some issue with your hard disk?");
|
||||
}
|
||||
}
|
||||
|
||||
fn replay_keys(inserts: &[H256], backing: &Database, counters: &mut HashMap<H256, i32>) {
|
||||
trace!("replay_keys: inserts={:?}, counters={:?}", inserts, counters);
|
||||
for h in inserts {
|
||||
if let Some(c) = counters.get_mut(h) {
|
||||
// already counting. increment.
|
||||
*c += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
// this is the first entry for this node in the journal.
|
||||
// it is initialised to 1 if it was already in.
|
||||
if Self::is_already_in(backing, h) {
|
||||
trace!("replace_keys: Key {} was already in!", h);
|
||||
counters.insert(h.clone(), 1);
|
||||
}
|
||||
}
|
||||
trace!("replay_keys: (end) counters={:?}", counters);
|
||||
}
|
||||
|
||||
fn kill_keys(deletes: Vec<H256>, counters: &mut HashMap<H256, i32>, batch: &DBTransaction) {
|
||||
for h in deletes.into_iter() {
|
||||
let mut n: Option<i32> = None;
|
||||
if let Some(c) = counters.get_mut(&h) {
|
||||
if *c > 1 {
|
||||
*c -= 1;
|
||||
continue;
|
||||
} else {
|
||||
n = Some(*c);
|
||||
}
|
||||
}
|
||||
match n {
|
||||
Some(i) if i == 1 => {
|
||||
counters.remove(&h);
|
||||
Self::reset_already_in(batch, &h);
|
||||
}
|
||||
None => {
|
||||
// Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs.
|
||||
//assert!(!Self::is_already_in(db, &h));
|
||||
batch.delete(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?");
|
||||
}
|
||||
_ => panic!("Invalid value in counters: {:?}", n),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Commit all recent insert operations and historical removals from the old era
|
||||
/// to the backing database.
|
||||
fn commit_with_counters(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
|
||||
// journal format:
|
||||
// journal format:
|
||||
// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ]
|
||||
// [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ]
|
||||
// [era, n] => [ ... ]
|
||||
|
||||
// TODO: store reclaim_period.
|
||||
|
||||
// when we make a new commit, we journal the inserts and removes.
|
||||
// for each end_era that we journaled that we are no passing by,
|
||||
// we remove all of its removes assuming it is canonical and all
|
||||
// of its inserts otherwise.
|
||||
// When we make a new commit, we make a journal of all blocks in the recent history and record
|
||||
// all keys that were inserted and deleted. The journal is ordered by era; multiple commits can
|
||||
// share the same era. This forms a data structure similar to a queue but whose items are tuples.
|
||||
// By the time comes to remove a tuple from the queue (i.e. then the era passes from recent history
|
||||
// into ancient history) then only one commit from the tuple is considered canonical. This commit
|
||||
// is kept in the main backing database, whereas any others from the same era are reverted.
|
||||
//
|
||||
// We also keep reference counters for each key inserted in the journal to handle
|
||||
// the following cases where key K must not be deleted from the DB when processing removals :
|
||||
// Given H is the journal size in eras, 0 <= C <= H.
|
||||
// Key K is removed in era A(N) and re-inserted in canonical era B(N + C).
|
||||
// Key K is removed in era A(N) and re-inserted in non-canonical era B`(N + C).
|
||||
// Key K is added in non-canonical era A'(N) canonical B(N + C).
|
||||
// It is possible that a key, properly available in the backing database be deleted and re-inserted
|
||||
// in the recent history queue, yet have both operations in commits that are eventually non-canonical.
|
||||
// To avoid the original, and still required, key from being deleted, we maintain a reference count
|
||||
// which includes an original key, if any.
|
||||
//
|
||||
// The semantics of the `counter` are:
|
||||
// insert key k:
|
||||
// counter already contains k: count += 1
|
||||
// counter doesn't contain k:
|
||||
// backing db contains k: count = 1
|
||||
// backing db doesn't contain k: insert into backing db, count = 0
|
||||
// delete key k:
|
||||
// counter contains k (count is asserted to be non-zero):
|
||||
// count > 1: counter -= 1
|
||||
// count == 1: remove counter
|
||||
// count == 0: remove key from backing db
|
||||
// counter doesn't contain k: remove key from backing db
|
||||
//
|
||||
// Practically, this means that for each commit block turning from recent to ancient we do the
|
||||
// following:
|
||||
// is_canonical:
|
||||
// inserts: Ignored (left alone in the backing database).
|
||||
// deletes: Enacted; however, recent history queue is checked for ongoing references. This is
|
||||
// reduced as a preference to deletion from the backing database.
|
||||
// !is_canonical:
|
||||
// inserts: Reverted; however, recent history queue is checked for ongoing references. This is
|
||||
// reduced as a preference to deletion from the backing database.
|
||||
// deletes: Ignored (they were never inserted).
|
||||
//
|
||||
// The counter is encreased each time a key is inserted in the journal in the commit. The list of insertions
|
||||
// is saved with the era record. When the era becomes end_era and goes out of journal the counter is decreased
|
||||
// and the key is safe to delete.
|
||||
|
||||
// record new commit's details.
|
||||
trace!("commit: #{} ({}), end era: {:?}", now, id, end);
|
||||
@ -183,36 +281,40 @@ impl JournalDB {
|
||||
let mut index = 0usize;
|
||||
let mut last;
|
||||
|
||||
while {
|
||||
let record = try!(self.backing.get({
|
||||
let mut r = RlpStream::new_list(3);
|
||||
r.append(&now);
|
||||
r.append(&index);
|
||||
r.append(&&PADDING[..]);
|
||||
last = r.drain();
|
||||
&last
|
||||
}));
|
||||
match record {
|
||||
Some(r) => {
|
||||
assert!(&Rlp::new(&r).val_at::<H256>(0) != id);
|
||||
true
|
||||
},
|
||||
None => false,
|
||||
}
|
||||
} {
|
||||
while try!(self.backing.get({
|
||||
let mut r = RlpStream::new_list(3);
|
||||
r.append(&now);
|
||||
r.append(&index);
|
||||
r.append(&&PADDING[..]);
|
||||
last = r.drain();
|
||||
&last
|
||||
})).is_some() {
|
||||
index += 1;
|
||||
}
|
||||
|
||||
let drained = self.overlay.drain();
|
||||
let removes: Vec<H256> = drained
|
||||
.iter()
|
||||
.filter_map(|(k, &(_, c))| if c < 0 {Some(k.clone())} else {None})
|
||||
.collect();
|
||||
let inserts: Vec<(H256, Bytes)> = drained
|
||||
.into_iter()
|
||||
.filter_map(|(k, (v, r))| if r > 0 { assert!(r == 1); Some((k, v)) } else { assert!(r >= -1); None })
|
||||
.collect();
|
||||
|
||||
let mut r = RlpStream::new_list(3);
|
||||
let inserts: Vec<H256> = self.overlay.keys().iter().filter(|&(_, &c)| c > 0).map(|(key, _)| key.clone()).collect();
|
||||
// Increase counter for each inserted key no matter if the block is canonical or not.
|
||||
for i in &inserts {
|
||||
*counters.entry(i.clone()).or_insert(0) += 1;
|
||||
}
|
||||
let removes: Vec<H256> = self.overlay.keys().iter().filter(|&(_, &c)| c < 0).map(|(key, _)| key.clone()).collect();
|
||||
r.append(id);
|
||||
r.append(&inserts);
|
||||
|
||||
// Process the new inserts.
|
||||
// We use the inserts for three things. For each:
|
||||
// - we place into the backing DB or increment the counter if already in;
|
||||
// - we note in the backing db that it was already in;
|
||||
// - we write the key into our journal for this block;
|
||||
|
||||
r.begin_list(inserts.len());
|
||||
inserts.iter().foreach(|&(k, _)| {r.append(&k);});
|
||||
r.append(&removes);
|
||||
Self::insert_keys(&inserts, &self.backing, &mut counters, &batch);
|
||||
try!(batch.put(&last, r.as_raw()));
|
||||
try!(batch.put(&LATEST_ERA_KEY, &encode(&now)));
|
||||
}
|
||||
@ -221,8 +323,6 @@ impl JournalDB {
|
||||
if let Some((end_era, canon_id)) = end {
|
||||
let mut index = 0usize;
|
||||
let mut last;
|
||||
let mut to_remove: Vec<H256> = Vec::new();
|
||||
let mut canon_inserts: Vec<H256> = Vec::new();
|
||||
while let Some(rlp_data) = try!(self.backing.get({
|
||||
let mut r = RlpStream::new_list(3);
|
||||
r.append(&end_era);
|
||||
@ -232,54 +332,19 @@ impl JournalDB {
|
||||
&last
|
||||
})) {
|
||||
let rlp = Rlp::new(&rlp_data);
|
||||
let mut inserts: Vec<H256> = rlp.val_at(1);
|
||||
JournalDB::decrease_counters(&inserts, &mut counters);
|
||||
let inserts: Vec<H256> = rlp.val_at(1);
|
||||
let deletes: Vec<H256> = rlp.val_at(2);
|
||||
// Collect keys to be removed. These are removed keys for canonical block, inserted for non-canonical
|
||||
if canon_id == rlp.val_at(0) {
|
||||
let mut canon_deletes: Vec<H256> = rlp.val_at(2);
|
||||
trace!("Purging nodes deleted from canon: {:?}", canon_deletes);
|
||||
to_remove.append(&mut canon_deletes);
|
||||
canon_inserts = inserts;
|
||||
}
|
||||
else {
|
||||
trace!("Purging nodes inserted in non-canon: {:?}", inserts);
|
||||
to_remove.append(&mut inserts);
|
||||
}
|
||||
trace!("commit: Delete journal for time #{}.{}: {}, (canon was {}): {} entries", end_era, index, rlp.val_at::<H256>(0), canon_id, to_remove.len());
|
||||
Self::kill_keys(if canon_id == rlp.val_at(0) {deletes} else {inserts}, &mut counters, &batch);
|
||||
try!(batch.delete(&last));
|
||||
index += 1;
|
||||
}
|
||||
|
||||
let canon_inserts = canon_inserts.drain(..).collect::<HashSet<_>>();
|
||||
// Purge removed keys if they are not referenced and not re-inserted in the canon commit
|
||||
let mut deletes = 0;
|
||||
trace!("Purging filtered nodes: {:?}", to_remove.iter().filter(|h| !counters.contains_key(h) && !canon_inserts.contains(h)).collect::<Vec<_>>());
|
||||
for h in to_remove.iter().filter(|h| !counters.contains_key(h) && !canon_inserts.contains(h)) {
|
||||
try!(batch.delete(&h));
|
||||
deletes += 1;
|
||||
}
|
||||
trace!("Total nodes purged: {}", deletes);
|
||||
trace!("JournalDB: delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id);
|
||||
}
|
||||
|
||||
// Commit overlay insertions
|
||||
let ret = Self::batch_overlay_insertions(&mut self.overlay, &batch);
|
||||
try!(self.backing.write(batch));
|
||||
Ok(ret as u32)
|
||||
}
|
||||
|
||||
|
||||
// Decrease counters for given keys. Deletes obsolete counters
|
||||
fn decrease_counters(keys: &[H256], counters: &mut HashMap<H256, i32>) {
|
||||
for i in keys.iter() {
|
||||
let delete_counter = {
|
||||
let cnt = counters.get_mut(i).expect("Missing key counter");
|
||||
*cnt -= 1;
|
||||
*cnt == 0
|
||||
};
|
||||
if delete_counter {
|
||||
counters.remove(i);
|
||||
}
|
||||
}
|
||||
// trace!("JournalDB::commit() deleted {} nodes", deletes);
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
fn payload(&self, key: &H256) -> Option<Bytes> {
|
||||
@ -287,7 +352,7 @@ impl JournalDB {
|
||||
}
|
||||
|
||||
fn read_counters(db: &Database) -> HashMap<H256, i32> {
|
||||
let mut res = HashMap::new();
|
||||
let mut counters = HashMap::new();
|
||||
if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") {
|
||||
let mut era = decode::<u64>(&val);
|
||||
loop {
|
||||
@ -299,11 +364,10 @@ impl JournalDB {
|
||||
r.append(&&PADDING[..]);
|
||||
&r.drain()
|
||||
}).expect("Low-level database error.") {
|
||||
trace!("read_counters: era={}, index={}", era, index);
|
||||
let rlp = Rlp::new(&rlp_data);
|
||||
let to_add: Vec<H256> = rlp.val_at(1);
|
||||
for h in to_add {
|
||||
*res.entry(h).or_insert(0) += 1;
|
||||
}
|
||||
let inserts: Vec<H256> = rlp.val_at(1);
|
||||
Self::replay_keys(&inserts, db, &mut counters);
|
||||
index += 1;
|
||||
};
|
||||
if index == 0 || era == 0 {
|
||||
@ -312,13 +376,21 @@ impl JournalDB {
|
||||
era -= 1;
|
||||
}
|
||||
}
|
||||
trace!("Recovered {} counters", res.len());
|
||||
res
|
||||
trace!("Recovered {} counters", counters.len());
|
||||
counters
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns heap memory size used
|
||||
pub fn mem_used(&self) -> usize {
|
||||
self.overlay.mem_used() + match self.counters {
|
||||
Some(ref c) => c.read().unwrap().heap_size_of_children(),
|
||||
None => 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HashDB for JournalDB {
|
||||
fn keys(&self) -> HashMap<H256, i32> {
|
||||
fn keys(&self) -> HashMap<H256, i32> {
|
||||
let mut ret: HashMap<H256, i32> = HashMap::new();
|
||||
for (key, _) in self.backing.iter() {
|
||||
let h = H256::from_slice(key.deref());
|
||||
@ -332,7 +404,7 @@ impl HashDB for JournalDB {
|
||||
ret
|
||||
}
|
||||
|
||||
fn lookup(&self, key: &H256) -> Option<&[u8]> {
|
||||
fn lookup(&self, key: &H256) -> Option<&[u8]> {
|
||||
let k = self.overlay.raw(key);
|
||||
match k {
|
||||
Some(&(ref d, rc)) if rc > 0 => Some(d),
|
||||
@ -347,18 +419,18 @@ impl HashDB for JournalDB {
|
||||
}
|
||||
}
|
||||
|
||||
fn exists(&self, key: &H256) -> bool {
|
||||
fn exists(&self, key: &H256) -> bool {
|
||||
self.lookup(key).is_some()
|
||||
}
|
||||
|
||||
fn insert(&mut self, value: &[u8]) -> H256 {
|
||||
fn insert(&mut self, value: &[u8]) -> H256 {
|
||||
self.overlay.insert(value)
|
||||
}
|
||||
fn emplace(&mut self, key: H256, value: Bytes) {
|
||||
self.overlay.emplace(key, value);
|
||||
self.overlay.emplace(key, value);
|
||||
}
|
||||
fn kill(&mut self, key: &H256) {
|
||||
self.overlay.kill(key);
|
||||
fn kill(&mut self, key: &H256) {
|
||||
self.overlay.kill(key);
|
||||
}
|
||||
}
|
||||
|
||||
@ -368,6 +440,28 @@ mod tests {
|
||||
use super::*;
|
||||
use hashdb::*;
|
||||
|
||||
#[test]
|
||||
fn insert_same_in_fork() {
|
||||
// history is 1
|
||||
let mut jdb = JournalDB::new_temp();
|
||||
|
||||
let x = jdb.insert(b"X");
|
||||
jdb.commit(1, &b"1".sha3(), None).unwrap();
|
||||
jdb.commit(2, &b"2".sha3(), None).unwrap();
|
||||
jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap();
|
||||
jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap();
|
||||
|
||||
jdb.remove(&x);
|
||||
jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap();
|
||||
let x = jdb.insert(b"X");
|
||||
jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap();
|
||||
|
||||
jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap();
|
||||
jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap();
|
||||
|
||||
assert!(jdb.exists(&x));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn long_history() {
|
||||
// history is 3
|
||||
@ -488,15 +582,18 @@ mod tests {
|
||||
assert!(jdb.exists(&foo));
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn reopen() {
|
||||
let mut dir = ::std::env::temp_dir();
|
||||
dir.push(H32::random().hex());
|
||||
let bar = H256::random();
|
||||
|
||||
let foo = {
|
||||
let mut jdb = JournalDB::new(dir.to_str().unwrap());
|
||||
// history is 1
|
||||
let foo = jdb.insert(b"foo");
|
||||
jdb.emplace(bar.clone(), b"bar".to_vec());
|
||||
jdb.commit(0, &b"0".sha3(), None).unwrap();
|
||||
foo
|
||||
};
|
||||
@ -510,8 +607,67 @@ mod tests {
|
||||
{
|
||||
let mut jdb = JournalDB::new(dir.to_str().unwrap());
|
||||
assert!(jdb.exists(&foo));
|
||||
assert!(jdb.exists(&bar));
|
||||
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
|
||||
assert!(!jdb.exists(&foo));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reopen_remove() {
|
||||
let mut dir = ::std::env::temp_dir();
|
||||
dir.push(H32::random().hex());
|
||||
|
||||
let foo = {
|
||||
let mut jdb = JournalDB::new(dir.to_str().unwrap());
|
||||
// history is 1
|
||||
let foo = jdb.insert(b"foo");
|
||||
jdb.commit(0, &b"0".sha3(), None).unwrap();
|
||||
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
|
||||
|
||||
// foo is ancient history.
|
||||
|
||||
jdb.insert(b"foo");
|
||||
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
|
||||
foo
|
||||
};
|
||||
|
||||
{
|
||||
let mut jdb = JournalDB::new(dir.to_str().unwrap());
|
||||
jdb.remove(&foo);
|
||||
jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap();
|
||||
assert!(jdb.exists(&foo));
|
||||
jdb.remove(&foo);
|
||||
jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap();
|
||||
jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap();
|
||||
assert!(!jdb.exists(&foo));
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn reopen_fork() {
|
||||
let mut dir = ::std::env::temp_dir();
|
||||
dir.push(H32::random().hex());
|
||||
let (foo, bar, baz) = {
|
||||
let mut jdb = JournalDB::new(dir.to_str().unwrap());
|
||||
// history is 1
|
||||
let foo = jdb.insert(b"foo");
|
||||
let bar = jdb.insert(b"bar");
|
||||
jdb.commit(0, &b"0".sha3(), None).unwrap();
|
||||
jdb.remove(&foo);
|
||||
let baz = jdb.insert(b"baz");
|
||||
jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
|
||||
|
||||
jdb.remove(&bar);
|
||||
jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
|
||||
(foo, bar, baz)
|
||||
};
|
||||
|
||||
{
|
||||
let mut jdb = JournalDB::new(dir.to_str().unwrap());
|
||||
jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
|
||||
assert!(jdb.exists(&foo));
|
||||
assert!(!jdb.exists(&baz));
|
||||
assert!(!jdb.exists(&bar));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -84,6 +84,7 @@ impl SecretStore {
|
||||
let mut path = ::std::env::home_dir().expect("Failed to get home dir");
|
||||
path.push(".parity");
|
||||
path.push("keys");
|
||||
::std::fs::create_dir_all(&path).expect("Should panic since it is critical to be able to access home dir");
|
||||
Self::new_in(&path)
|
||||
}
|
||||
|
||||
|
@ -55,8 +55,7 @@ pub struct DatabaseIterator<'a> {
|
||||
impl<'a> Iterator for DatabaseIterator<'a> {
|
||||
type Item = (Box<[u8]>, Box<[u8]>);
|
||||
|
||||
#[cfg_attr(feature="dev", allow(type_complexity))]
|
||||
fn next(&mut self) -> Option<(Box<[u8]>, Box<[u8]>)> {
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.iter.next()
|
||||
}
|
||||
}
|
||||
|
@ -15,18 +15,18 @@
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#![warn(missing_docs)]
|
||||
#![cfg_attr(feature="dev", feature(plugin))]
|
||||
#![cfg_attr(feature="dev", plugin(clippy))]
|
||||
#![cfg_attr(all(nightly, feature="dev"), feature(plugin))]
|
||||
#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))]
|
||||
|
||||
// Clippy settings
|
||||
// TODO [todr] not really sure
|
||||
#![cfg_attr(feature="dev", allow(needless_range_loop))]
|
||||
#![cfg_attr(all(nightly, feature="dev"), allow(needless_range_loop))]
|
||||
// Shorter than if-else
|
||||
#![cfg_attr(feature="dev", allow(match_bool))]
|
||||
#![cfg_attr(all(nightly, feature="dev"), allow(match_bool))]
|
||||
// We use that to be more explicit about handled cases
|
||||
#![cfg_attr(feature="dev", allow(match_same_arms))]
|
||||
#![cfg_attr(all(nightly, feature="dev"), allow(match_same_arms))]
|
||||
// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref.
|
||||
#![cfg_attr(feature="dev", allow(clone_on_copy))]
|
||||
#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))]
|
||||
|
||||
//! Ethcore-util library
|
||||
//!
|
||||
|
@ -21,6 +21,7 @@ use bytes::*;
|
||||
use rlp::*;
|
||||
use sha3::*;
|
||||
use hashdb::*;
|
||||
use heapsize::*;
|
||||
use std::mem;
|
||||
use std::collections::HashMap;
|
||||
|
||||
@ -143,6 +144,11 @@ impl MemoryDB {
|
||||
}
|
||||
self.raw(key).unwrap()
|
||||
}
|
||||
|
||||
/// Returns the size of allocated heap memory
|
||||
pub fn mem_used(&self) -> usize {
|
||||
self.data.heap_size_of_children()
|
||||
}
|
||||
}
|
||||
|
||||
static NULL_RLP_STATIC: [u8; 1] = [0x80; 1];
|
||||
|
@ -190,25 +190,25 @@ impl Connection {
|
||||
|
||||
/// Register this connection with the IO event loop.
|
||||
pub fn register_socket<Host: Handler>(&self, reg: Token, event_loop: &mut EventLoop<Host>) -> io::Result<()> {
|
||||
trace!(target: "net", "connection register; token={:?}", reg);
|
||||
trace!(target: "network", "connection register; token={:?}", reg);
|
||||
if let Err(e) = event_loop.register(&self.socket, reg, self.interest, PollOpt::edge() | PollOpt::oneshot()) {
|
||||
debug!("Failed to register {:?}, {:?}", reg, e);
|
||||
trace!(target: "network", "Failed to register {:?}, {:?}", reg, e);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update connection registration. Should be called at the end of the IO handler.
|
||||
pub fn update_socket<Host: Handler>(&self, reg: Token, event_loop: &mut EventLoop<Host>) -> io::Result<()> {
|
||||
trace!(target: "net", "connection reregister; token={:?}", reg);
|
||||
trace!(target: "network", "connection reregister; token={:?}", reg);
|
||||
event_loop.reregister( &self.socket, reg, self.interest, PollOpt::edge() | PollOpt::oneshot()).or_else(|e| {
|
||||
debug!("Failed to reregister {:?}, {:?}", reg, e);
|
||||
trace!(target: "network", "Failed to reregister {:?}, {:?}", reg, e);
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
/// Delete connection registration. Should be called at the end of the IO handler.
|
||||
pub fn deregister_socket<Host: Handler>(&self, event_loop: &mut EventLoop<Host>) -> io::Result<()> {
|
||||
trace!(target: "net", "connection deregister; token={:?}", self.token);
|
||||
trace!(target: "network", "connection deregister; token={:?}", self.token);
|
||||
event_loop.deregister(&self.socket).ok(); // ignore errors here
|
||||
Ok(())
|
||||
}
|
||||
|
@ -113,14 +113,14 @@ impl Discovery {
|
||||
}
|
||||
|
||||
/// Add a new node to discovery table. Pings the node.
|
||||
pub fn add_node(&mut self, e: NodeEntry) {
|
||||
pub fn add_node(&mut self, e: NodeEntry) {
|
||||
let endpoint = e.endpoint.clone();
|
||||
self.update_node(e);
|
||||
self.ping(&endpoint);
|
||||
}
|
||||
|
||||
/// Add a list of known nodes to the table.
|
||||
pub fn init_node_list(&mut self, mut nodes: Vec<NodeEntry>) {
|
||||
pub fn init_node_list(&mut self, mut nodes: Vec<NodeEntry>) {
|
||||
for n in nodes.drain(..) {
|
||||
self.update_node(n);
|
||||
}
|
||||
@ -243,7 +243,7 @@ impl Discovery {
|
||||
self.send_to(packet, address.clone());
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="dev", allow(map_clone))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(map_clone))]
|
||||
fn nearest_node_entries(target: &NodeId, buckets: &[NodeBucket]) -> Vec<NodeEntry> {
|
||||
let mut found: BTreeMap<u32, Vec<&NodeEntry>> = BTreeMap::new();
|
||||
let mut count = 0;
|
||||
@ -251,7 +251,7 @@ impl Discovery {
|
||||
// Sort nodes by distance to target
|
||||
for bucket in buckets {
|
||||
for node in &bucket.nodes {
|
||||
let distance = Discovery::distance(target, &node.address.id);
|
||||
let distance = Discovery::distance(target, &node.address.id);
|
||||
found.entry(distance).or_insert_with(Vec::new).push(&node.address);
|
||||
if count == BUCKET_SIZE {
|
||||
// delete the most distant element
|
||||
@ -310,7 +310,7 @@ impl Discovery {
|
||||
None
|
||||
}),
|
||||
Ok(_) => None,
|
||||
Err(e) => {
|
||||
Err(e) => {
|
||||
warn!("Error reading UPD socket: {:?}", e);
|
||||
None
|
||||
}
|
||||
@ -339,7 +339,7 @@ impl Discovery {
|
||||
PACKET_PONG => self.on_pong(&rlp, &node_id, &from),
|
||||
PACKET_FIND_NODE => self.on_find_node(&rlp, &node_id, &from),
|
||||
PACKET_NEIGHBOURS => self.on_neighbours(&rlp, &node_id, &from),
|
||||
_ => {
|
||||
_ => {
|
||||
debug!("Unknown UDP packet: {}", packet_id);
|
||||
Ok(None)
|
||||
}
|
||||
@ -367,14 +367,14 @@ impl Discovery {
|
||||
}
|
||||
else {
|
||||
self.update_node(entry.clone());
|
||||
added_map.insert(node.clone(), entry);
|
||||
added_map.insert(node.clone(), entry);
|
||||
}
|
||||
let hash = rlp.as_raw().sha3();
|
||||
let mut response = RlpStream::new_list(2);
|
||||
dest.to_rlp_list(&mut response);
|
||||
response.append(&hash);
|
||||
self.send_packet(PACKET_PONG, from, &response.drain());
|
||||
|
||||
|
||||
Ok(Some(TableUpdates { added: added_map, removed: HashSet::new() }))
|
||||
}
|
||||
|
||||
@ -391,7 +391,7 @@ impl Discovery {
|
||||
}
|
||||
self.clear_ping(node);
|
||||
let mut added_map = HashMap::new();
|
||||
added_map.insert(node.clone(), entry);
|
||||
added_map.insert(node.clone(), entry);
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
@ -466,8 +466,8 @@ impl Discovery {
|
||||
pub fn round(&mut self) -> Option<TableUpdates> {
|
||||
let removed = self.check_expired(false);
|
||||
self.discover();
|
||||
if !removed.is_empty() {
|
||||
Some(TableUpdates { added: HashMap::new(), removed: removed })
|
||||
if !removed.is_empty() {
|
||||
Some(TableUpdates { added: HashMap::new(), removed: removed })
|
||||
} else { None }
|
||||
}
|
||||
|
||||
|
@ -222,7 +222,7 @@ impl Handshake {
|
||||
|
||||
/// Parse, validate and confirm auth message
|
||||
fn read_auth(&mut self, secret: &Secret, data: &[u8]) -> Result<(), UtilError> {
|
||||
trace!(target:"net", "Received handshake auth from {:?}", self.connection.socket.peer_addr());
|
||||
trace!(target:"network", "Received handshake auth from {:?}", self.connection.socket.peer_addr());
|
||||
if data.len() != V4_AUTH_PACKET_SIZE {
|
||||
debug!(target:"net", "Wrong auth packet size");
|
||||
return Err(From::from(NetworkError::BadProtocol));
|
||||
@ -253,7 +253,7 @@ impl Handshake {
|
||||
}
|
||||
|
||||
fn read_auth_eip8(&mut self, secret: &Secret, data: &[u8]) -> Result<(), UtilError> {
|
||||
trace!(target:"net", "Received EIP8 handshake auth from {:?}", self.connection.socket.peer_addr());
|
||||
trace!(target:"network", "Received EIP8 handshake auth from {:?}", self.connection.socket.peer_addr());
|
||||
self.auth_cipher.extend_from_slice(data);
|
||||
let auth = try!(ecies::decrypt(secret, &self.auth_cipher[0..2], &self.auth_cipher[2..]));
|
||||
let rlp = UntrustedRlp::new(&auth);
|
||||
@ -268,7 +268,7 @@ impl Handshake {
|
||||
|
||||
/// Parse and validate ack message
|
||||
fn read_ack(&mut self, secret: &Secret, data: &[u8]) -> Result<(), UtilError> {
|
||||
trace!(target:"net", "Received handshake auth to {:?}", self.connection.socket.peer_addr());
|
||||
trace!(target:"network", "Received handshake auth to {:?}", self.connection.socket.peer_addr());
|
||||
if data.len() != V4_ACK_PACKET_SIZE {
|
||||
debug!(target:"net", "Wrong ack packet size");
|
||||
return Err(From::from(NetworkError::BadProtocol));
|
||||
@ -296,7 +296,7 @@ impl Handshake {
|
||||
}
|
||||
|
||||
fn read_ack_eip8(&mut self, secret: &Secret, data: &[u8]) -> Result<(), UtilError> {
|
||||
trace!(target:"net", "Received EIP8 handshake auth from {:?}", self.connection.socket.peer_addr());
|
||||
trace!(target:"network", "Received EIP8 handshake auth from {:?}", self.connection.socket.peer_addr());
|
||||
self.ack_cipher.extend_from_slice(data);
|
||||
let ack = try!(ecies::decrypt(secret, &self.ack_cipher[0..2], &self.ack_cipher[2..]));
|
||||
let rlp = UntrustedRlp::new(&ack);
|
||||
@ -309,7 +309,7 @@ impl Handshake {
|
||||
|
||||
/// Sends auth message
|
||||
fn write_auth(&mut self, secret: &Secret, public: &Public) -> Result<(), UtilError> {
|
||||
trace!(target:"net", "Sending handshake auth to {:?}", self.connection.socket.peer_addr());
|
||||
trace!(target:"network", "Sending handshake auth to {:?}", self.connection.socket.peer_addr());
|
||||
let mut data = [0u8; /*Signature::SIZE*/ 65 + /*H256::SIZE*/ 32 + /*Public::SIZE*/ 64 + /*H256::SIZE*/ 32 + 1]; //TODO: use associated constants
|
||||
let len = data.len();
|
||||
{
|
||||
@ -336,7 +336,7 @@ impl Handshake {
|
||||
|
||||
/// Sends ack message
|
||||
fn write_ack(&mut self) -> Result<(), UtilError> {
|
||||
trace!(target:"net", "Sending handshake ack to {:?}", self.connection.socket.peer_addr());
|
||||
trace!(target:"network", "Sending handshake ack to {:?}", self.connection.socket.peer_addr());
|
||||
let mut data = [0u8; 1 + /*Public::SIZE*/ 64 + /*H256::SIZE*/ 32]; //TODO: use associated constants
|
||||
let len = data.len();
|
||||
{
|
||||
@ -355,7 +355,7 @@ impl Handshake {
|
||||
|
||||
/// Sends EIP8 ack message
|
||||
fn write_ack_eip8(&mut self) -> Result<(), UtilError> {
|
||||
trace!(target:"net", "Sending EIP8 handshake ack to {:?}", self.connection.socket.peer_addr());
|
||||
trace!(target:"network", "Sending EIP8 handshake ack to {:?}", self.connection.socket.peer_addr());
|
||||
let mut rlp = RlpStream::new_list(3);
|
||||
rlp.append(self.ecdhe.public());
|
||||
rlp.append(&self.nonce);
|
||||
|
@ -170,29 +170,37 @@ pub struct NetworkContext<'s, Message> where Message: Send + Sync + Clone + 'sta
|
||||
io: &'s IoContext<NetworkIoMessage<Message>>,
|
||||
protocol: ProtocolId,
|
||||
sessions: Arc<RwLock<Slab<SharedSession>>>,
|
||||
session: Option<StreamToken>,
|
||||
session: Option<SharedSession>,
|
||||
session_id: Option<StreamToken>,
|
||||
}
|
||||
|
||||
impl<'s, Message> NetworkContext<'s, Message> where Message: Send + Sync + Clone + 'static, {
|
||||
/// Create a new network IO access point. Takes references to all the data that can be updated within the IO handler.
|
||||
fn new(io: &'s IoContext<NetworkIoMessage<Message>>,
|
||||
protocol: ProtocolId,
|
||||
session: Option<StreamToken>, sessions: Arc<RwLock<Slab<SharedSession>>>) -> NetworkContext<'s, Message> {
|
||||
session: Option<SharedSession>, sessions: Arc<RwLock<Slab<SharedSession>>>) -> NetworkContext<'s, Message> {
|
||||
let id = session.as_ref().map(|s| s.lock().unwrap().token());
|
||||
NetworkContext {
|
||||
io: io,
|
||||
protocol: protocol,
|
||||
session_id: id,
|
||||
session: session,
|
||||
sessions: sessions,
|
||||
}
|
||||
}
|
||||
|
||||
fn resolve_session(&self, peer: PeerId) -> Option<SharedSession> {
|
||||
match self.session_id {
|
||||
Some(id) if id == peer => self.session.clone(),
|
||||
_ => self.sessions.read().unwrap().get(peer).cloned(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Send a packet over the network to another peer.
|
||||
pub fn send(&self, peer: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError> {
|
||||
let session = { self.sessions.read().unwrap().get(peer).cloned() };
|
||||
let session = self.resolve_session(peer);
|
||||
if let Some(session) = session {
|
||||
session.lock().unwrap().deref_mut().send_packet(self.protocol, packet_id as u8, &data).unwrap_or_else(|e| {
|
||||
warn!(target: "network", "Send error: {:?}", e);
|
||||
}); //TODO: don't copy vector data
|
||||
try!(session.lock().unwrap().deref_mut().send_packet(self.protocol, packet_id as u8, &data));
|
||||
try!(self.io.update_registration(peer));
|
||||
} else {
|
||||
trace!(target: "network", "Send: Peer no longer exist")
|
||||
@ -200,14 +208,10 @@ impl<'s, Message> NetworkContext<'s, Message> where Message: Send + Sync + Clone
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Respond to a current network message. Panics if no there is no packet in the context.
|
||||
/// Respond to a current network message. Panics if no there is no packet in the context. If the session is expired returns nothing.
|
||||
pub fn respond(&self, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError> {
|
||||
match self.session {
|
||||
Some(session) => self.send(session, packet_id, data),
|
||||
None => {
|
||||
panic!("Respond: Session does not exist")
|
||||
}
|
||||
}
|
||||
assert!(self.session.is_some(), "Respond called without network context");
|
||||
self.send(self.session_id.unwrap(), packet_id, data)
|
||||
}
|
||||
|
||||
/// Send an IO message
|
||||
@ -215,7 +219,6 @@ impl<'s, Message> NetworkContext<'s, Message> where Message: Send + Sync + Clone
|
||||
self.io.message(NetworkIoMessage::User(msg));
|
||||
}
|
||||
|
||||
|
||||
/// Disable current protocol capability for given peer. If no capabilities left peer gets disconnected.
|
||||
pub fn disable_peer(&self, peer: PeerId) {
|
||||
//TODO: remove capability, disconnect if no capabilities left
|
||||
@ -239,7 +242,7 @@ impl<'s, Message> NetworkContext<'s, Message> where Message: Send + Sync + Clone
|
||||
|
||||
/// Returns peer identification string
|
||||
pub fn peer_info(&self, peer: PeerId) -> String {
|
||||
let session = { self.sessions.read().unwrap().get(peer).cloned() };
|
||||
let session = self.resolve_session(peer);
|
||||
if let Some(session) = session {
|
||||
return session.lock().unwrap().info.client_version.clone()
|
||||
}
|
||||
@ -504,7 +507,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
||||
debug!(target: "network", "Connecting peers: {} sessions, {} pending", self.session_count(), self.handshake_count());
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="dev", allow(single_match))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(single_match))]
|
||||
fn connect_peer(&self, id: &NodeId, io: &IoContext<NetworkIoMessage<Message>>) {
|
||||
if self.have_session(id)
|
||||
{
|
||||
@ -539,7 +542,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
||||
self.create_connection(socket, Some(id), io);
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="dev", allow(block_in_if_condition_stmt))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(block_in_if_condition_stmt))]
|
||||
fn create_connection(&self, socket: TcpStream, id: Option<&NodeId>, io: &IoContext<NetworkIoMessage<Message>>) {
|
||||
let nonce = self.info.write().unwrap().next_nonce();
|
||||
let mut handshakes = self.handshakes.write().unwrap();
|
||||
@ -624,7 +627,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
||||
let mut packet_data: Option<(ProtocolId, PacketId, Vec<u8>)> = None;
|
||||
let mut kill = false;
|
||||
let session = { self.sessions.read().unwrap().get(token).cloned() };
|
||||
if let Some(session) = session {
|
||||
if let Some(session) = session.clone() {
|
||||
let mut s = session.lock().unwrap();
|
||||
match s.readable(io, &self.info.read().unwrap()) {
|
||||
Err(e) => {
|
||||
@ -656,11 +659,11 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
||||
}
|
||||
for p in ready_data {
|
||||
let h = self.handlers.read().unwrap().get(p).unwrap().clone();
|
||||
h.connected(&NetworkContext::new(io, p, Some(token), self.sessions.clone()), &token);
|
||||
h.connected(&NetworkContext::new(io, p, session.clone(), self.sessions.clone()), &token);
|
||||
}
|
||||
if let Some((p, packet_id, data)) = packet_data {
|
||||
let h = self.handlers.read().unwrap().get(p).unwrap().clone();
|
||||
h.read(&NetworkContext::new(io, p, Some(token), self.sessions.clone()), &token, packet_id, &data[1..]);
|
||||
h.read(&NetworkContext::new(io, p, session.clone(), self.sessions.clone()), &token, packet_id, &data[1..]);
|
||||
}
|
||||
io.update_registration(token).unwrap_or_else(|e| debug!(target: "network", "Token registration error: {:?}", e));
|
||||
}
|
||||
@ -718,6 +721,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
||||
let mut to_disconnect: Vec<ProtocolId> = Vec::new();
|
||||
let mut failure_id = None;
|
||||
let mut deregister = false;
|
||||
let mut expired_session = None;
|
||||
match token {
|
||||
FIRST_HANDSHAKE ... LAST_HANDSHAKE => {
|
||||
let handshakes = self.handshakes.write().unwrap();
|
||||
@ -733,6 +737,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
||||
FIRST_SESSION ... LAST_SESSION => {
|
||||
let sessions = self.sessions.write().unwrap();
|
||||
if let Some(session) = sessions.get(token).cloned() {
|
||||
expired_session = Some(session.clone());
|
||||
let mut s = session.lock().unwrap();
|
||||
if !s.expired() {
|
||||
if s.is_ready() {
|
||||
@ -757,7 +762,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
||||
}
|
||||
for p in to_disconnect {
|
||||
let h = self.handlers.read().unwrap().get(p).unwrap().clone();
|
||||
h.disconnected(&NetworkContext::new(io, p, Some(token), self.sessions.clone()), &token);
|
||||
h.disconnected(&NetworkContext::new(io, p, expired_session.clone(), self.sessions.clone()), &token);
|
||||
}
|
||||
if deregister {
|
||||
io.deregister_stream(token).expect("Error deregistering stream");
|
||||
|
@ -213,6 +213,9 @@ impl Session {
|
||||
|
||||
/// Send a protocol packet to peer.
|
||||
pub fn send_packet(&mut self, protocol: &str, packet_id: u8, data: &[u8]) -> Result<(), UtilError> {
|
||||
if self.expired() {
|
||||
return Err(From::from(NetworkError::Expired));
|
||||
}
|
||||
let mut i = 0usize;
|
||||
while protocol != self.info.capabilities[i].protocol {
|
||||
i += 1;
|
||||
@ -351,15 +354,15 @@ impl Session {
|
||||
offset += caps[i].packet_count;
|
||||
i += 1;
|
||||
}
|
||||
trace!(target: "net", "Hello: {} v{} {} {:?}", client_version, protocol, id, caps);
|
||||
trace!(target: "network", "Hello: {} v{} {} {:?}", client_version, protocol, id, caps);
|
||||
self.info.client_version = client_version;
|
||||
self.info.capabilities = caps;
|
||||
if self.info.capabilities.is_empty() {
|
||||
trace!("No common capabilities with peer.");
|
||||
trace!(target: "network", "No common capabilities with peer.");
|
||||
return Err(From::from(self.disconnect(DisconnectReason::UselessPeer)));
|
||||
}
|
||||
if protocol != host.protocol_version {
|
||||
trace!("Peer protocol version mismatch: {}", protocol);
|
||||
trace!(target: "network", "Peer protocol version mismatch: {}", protocol);
|
||||
return Err(From::from(self.disconnect(DisconnectReason::UselessPeer)));
|
||||
}
|
||||
self.had_hello = true;
|
||||
|
@ -71,7 +71,7 @@ impl PanicHandler {
|
||||
|
||||
/// Invoke closure and catch any possible panics.
|
||||
/// In case of panic notifies all listeners about it.
|
||||
#[cfg_attr(feature="dev", allow(deprecated))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(deprecated))]
|
||||
pub fn catch_panic<G, R>(&self, g: G) -> thread::Result<R> where G: FnOnce() -> R + Send + 'static {
|
||||
let _guard = PanicGuard { handler: self };
|
||||
let result = g();
|
||||
|
@ -22,7 +22,7 @@ use super::trietraits::*;
|
||||
use super::node::*;
|
||||
|
||||
/// A `Trie` implementation using a generic `HashDB` backing database.
|
||||
///
|
||||
///
|
||||
/// Use it as a `Trie` trait object. You can use `db()` to get the backing database object, `keys`
|
||||
/// to get the keys belonging to the trie in the backing database, and `db_items_remaining()` to get
|
||||
/// which items in the backing database do not belong to this trie. If this is the only trie in the
|
||||
@ -54,7 +54,7 @@ pub struct TrieDB<'db> {
|
||||
pub hash_count: usize,
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="dev", allow(wrong_self_convention))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))]
|
||||
impl<'db> TrieDB<'db> {
|
||||
/// Create a new trie with the backing database `db` and `root`
|
||||
/// Panics, if `root` does not exist
|
||||
@ -63,16 +63,16 @@ impl<'db> TrieDB<'db> {
|
||||
flushln!("TrieDB::new({}): Trie root not found!", root);
|
||||
panic!("Trie root not found!");
|
||||
}
|
||||
TrieDB {
|
||||
db: db,
|
||||
TrieDB {
|
||||
db: db,
|
||||
root: root,
|
||||
hash_count: 0
|
||||
hash_count: 0
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the backing database.
|
||||
pub fn db(&'db self) -> &'db HashDB {
|
||||
self.db
|
||||
pub fn db(&'db self) -> &'db HashDB {
|
||||
self.db
|
||||
}
|
||||
|
||||
/// Determine all the keys in the backing database that belong to the trie.
|
||||
@ -142,7 +142,7 @@ impl<'db> TrieDB<'db> {
|
||||
|
||||
/// Indentation helper for `formal_all`.
|
||||
fn fmt_indent(&self, f: &mut fmt::Formatter, size: usize) -> fmt::Result {
|
||||
for _ in 0..size {
|
||||
for _ in 0..size {
|
||||
try!(write!(f, " "));
|
||||
}
|
||||
Ok(())
|
||||
@ -358,7 +358,7 @@ impl<'db> fmt::Debug for TrieDB<'db> {
|
||||
fn iterator() {
|
||||
use memorydb::*;
|
||||
use super::triedbmut::*;
|
||||
|
||||
|
||||
let d = vec![ &b"A"[..], &b"AA"[..], &b"AB"[..], &b"B"[..] ];
|
||||
|
||||
let mut memdb = MemoryDB::new();
|
||||
|
@ -23,7 +23,7 @@ use super::journal::*;
|
||||
use super::trietraits::*;
|
||||
|
||||
/// A `Trie` implementation using a generic `HashDB` backing database.
|
||||
///
|
||||
///
|
||||
/// Use it as a `Trie` trait object. You can use `db()` to get the backing database object, `keys`
|
||||
/// to get the keys belonging to the trie in the backing database, and `db_items_remaining()` to get
|
||||
/// which items in the backing database do not belong to this trie. If this is the only trie in the
|
||||
@ -66,21 +66,21 @@ enum MaybeChanged<'a> {
|
||||
Changed(Bytes),
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="dev", allow(wrong_self_convention))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))]
|
||||
impl<'db> TrieDBMut<'db> {
|
||||
/// Create a new trie with the backing database `db` and empty `root`
|
||||
/// Initialise to the state entailed by the genesis block.
|
||||
/// This guarantees the trie is built correctly.
|
||||
pub fn new(db: &'db mut HashDB, root: &'db mut H256) -> Self {
|
||||
pub fn new(db: &'db mut HashDB, root: &'db mut H256) -> Self {
|
||||
let mut r = TrieDBMut{
|
||||
db: db,
|
||||
db: db,
|
||||
root: root,
|
||||
hash_count: 0
|
||||
};
|
||||
hash_count: 0
|
||||
};
|
||||
|
||||
// set root rlp
|
||||
*r.root = SHA3_NULL_RLP.clone();
|
||||
r
|
||||
*r.root = SHA3_NULL_RLP.clone();
|
||||
r
|
||||
}
|
||||
|
||||
/// Create a new trie with the backing database `db` and `root`.
|
||||
@ -91,21 +91,21 @@ impl<'db> TrieDBMut<'db> {
|
||||
flushln!("Trie root not found {}", root);
|
||||
panic!("Trie root not found!");
|
||||
}
|
||||
TrieDBMut {
|
||||
db: db,
|
||||
TrieDBMut {
|
||||
db: db,
|
||||
root: root,
|
||||
hash_count: 0
|
||||
hash_count: 0
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the backing database.
|
||||
pub fn db(&'db self) -> &'db HashDB {
|
||||
self.db
|
||||
pub fn db(&'db self) -> &'db HashDB {
|
||||
self.db
|
||||
}
|
||||
|
||||
/// Get the backing database.
|
||||
pub fn db_mut(&'db mut self) -> &'db mut HashDB {
|
||||
self.db
|
||||
pub fn db_mut(&'db mut self) -> &'db mut HashDB {
|
||||
self.db
|
||||
}
|
||||
|
||||
/// Determine all the keys in the backing database that belong to the trie.
|
||||
@ -184,7 +184,7 @@ impl<'db> TrieDBMut<'db> {
|
||||
|
||||
/// Indentation helper for `formal_all`.
|
||||
fn fmt_indent(&self, f: &mut fmt::Formatter, size: usize) -> fmt::Result {
|
||||
for _ in 0..size {
|
||||
for _ in 0..size {
|
||||
try!(write!(f, " "));
|
||||
}
|
||||
Ok(())
|
||||
@ -350,7 +350,7 @@ impl<'db> TrieDBMut<'db> {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
||||
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
||||
/// Determine the RLP of the node, assuming we're inserting `partial` into the
|
||||
/// node currently of data `old`. This will *not* delete any hash of `old` from the database;
|
||||
/// it will just return the new RLP that includes the new node.
|
||||
@ -378,7 +378,7 @@ impl<'db> TrieDBMut<'db> {
|
||||
// original had empty slot - place a leaf there.
|
||||
true if old_rlp.at(i).is_empty() => journal.new_node(Self::compose_leaf(&partial.mid(1), value), &mut s),
|
||||
// original has something there already; augment.
|
||||
true => {
|
||||
true => {
|
||||
let new = self.augmented(self.take_node(&old_rlp.at(i), journal), &partial.mid(1), value, journal);
|
||||
journal.new_node(new, &mut s);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user