Merge branch 'master' into ethminer_crate
Conflicts: Cargo.lock Cargo.toml ethcore/src/client/client.rs hook.sh parity/main.rs rpc/Cargo.toml rpc/src/v1/impls/eth.rs sync/Cargo.toml sync/src/tests/helpers.rs
This commit is contained in:
commit
03da6c991f
3
Cargo.lock
generated
3
Cargo.lock
generated
@ -220,7 +220,6 @@ dependencies = [
|
|||||||
"num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
"num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rust-crypto 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rust-crypto 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
|
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -245,7 +244,6 @@ dependencies = [
|
|||||||
"jsonrpc-http-server 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"jsonrpc-http-server 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_codegen 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_codegen 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_json 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -314,7 +312,6 @@ dependencies = [
|
|||||||
"heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
|
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
16
Cargo.toml
16
Cargo.toml
@ -28,21 +28,17 @@ ethcore-devtools = { path = "devtools" }
|
|||||||
ethcore-rpc = { path = "rpc", optional = true }
|
ethcore-rpc = { path = "rpc", optional = true }
|
||||||
clippy = { version = "0.0.44", optional = true }
|
clippy = { version = "0.0.44", optional = true }
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
ethcore = { path = "ethcore", features = ["dev"] }
|
|
||||||
ethcore-util = { path = "util", features = ["dev"] }
|
|
||||||
ethsync = { path = "sync", features = ["dev"] }
|
|
||||||
ethcore-rpc = { path = "rpc", features = ["dev"] }
|
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["rpc"]
|
default = ["rpc"]
|
||||||
rpc = ["ethcore-rpc"]
|
rpc = ["ethcore-rpc"]
|
||||||
dev = ["ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethminer/dev"]
|
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethminer/dev"]
|
||||||
dev-clippy = ["clippy", "ethcore/clippy", "ethcore-util/clippy", "ethsync/clippy", "ethcore-rpc/clippy",
|
|
||||||
"ethminer/clippy"]
|
|
||||||
travis-beta = ["ethcore/json-tests"]
|
travis-beta = ["ethcore/json-tests"]
|
||||||
travis-nightly = ["ethcore/json-tests", "dev-clippy", "dev"]
|
travis-nightly = ["ethcore/json-tests", "dev"]
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
path = "parity/main.rs"
|
path = "parity/main.rs"
|
||||||
name = "parity"
|
name = "parity"
|
||||||
|
|
||||||
|
[profile.release]
|
||||||
|
debug = false
|
||||||
|
lto = false
|
||||||
|
@ -5,10 +5,6 @@ license = "GPL-3.0"
|
|||||||
name = "ethcore"
|
name = "ethcore"
|
||||||
version = "0.9.99"
|
version = "0.9.99"
|
||||||
authors = ["Ethcore <admin@ethcore.io>"]
|
authors = ["Ethcore <admin@ethcore.io>"]
|
||||||
build = "build.rs"
|
|
||||||
|
|
||||||
[build-dependencies]
|
|
||||||
rustc_version = "0.1"
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
log = "0.3"
|
log = "0.3"
|
||||||
@ -31,5 +27,5 @@ jit = ["evmjit"]
|
|||||||
evm-debug = []
|
evm-debug = []
|
||||||
json-tests = []
|
json-tests = []
|
||||||
test-heavy = []
|
test-heavy = []
|
||||||
dev = []
|
dev = ["clippy"]
|
||||||
default = []
|
default = []
|
||||||
|
@ -24,7 +24,7 @@ pub type LogBloom = H2048;
|
|||||||
/// Constant 2048-bit datum for 0. Often used as a default.
|
/// Constant 2048-bit datum for 0. Often used as a default.
|
||||||
pub static ZERO_LOGBLOOM: LogBloom = H2048([0x00; 256]);
|
pub static ZERO_LOGBLOOM: LogBloom = H2048([0x00; 256]);
|
||||||
|
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(enum_variant_names))]
|
#[cfg_attr(feature="dev", allow(enum_variant_names))]
|
||||||
/// Semantic boolean for when a seal/signature is included.
|
/// Semantic boolean for when a seal/signature is included.
|
||||||
pub enum Seal {
|
pub enum Seal {
|
||||||
/// The seal/signature is included.
|
/// The seal/signature is included.
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
//! Blockchain block.
|
//! Blockchain block.
|
||||||
|
|
||||||
#![cfg_attr(all(nightly, feature="dev"), allow(ptr_arg))] // Because of &LastHashes -> &Vec<_>
|
#![cfg_attr(feature="dev", allow(ptr_arg))] // Because of &LastHashes -> &Vec<_>
|
||||||
|
|
||||||
use common::*;
|
use common::*;
|
||||||
use engine::*;
|
use engine::*;
|
||||||
|
@ -95,7 +95,7 @@ pub struct BlockQueue {
|
|||||||
panic_handler: Arc<PanicHandler>,
|
panic_handler: Arc<PanicHandler>,
|
||||||
engine: Arc<Box<Engine>>,
|
engine: Arc<Box<Engine>>,
|
||||||
more_to_verify: Arc<Condvar>,
|
more_to_verify: Arc<Condvar>,
|
||||||
verification: Arc<Mutex<Verification>>,
|
verification: Arc<Verification>,
|
||||||
verifiers: Vec<JoinHandle<()>>,
|
verifiers: Vec<JoinHandle<()>>,
|
||||||
deleting: Arc<AtomicBool>,
|
deleting: Arc<AtomicBool>,
|
||||||
ready_signal: Arc<QueueSignal>,
|
ready_signal: Arc<QueueSignal>,
|
||||||
@ -121,7 +121,7 @@ struct QueueSignal {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl QueueSignal {
|
impl QueueSignal {
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(bool_comparison))]
|
#[cfg_attr(feature="dev", allow(bool_comparison))]
|
||||||
fn set(&self) {
|
fn set(&self) {
|
||||||
if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false {
|
if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false {
|
||||||
self.message_channel.send(UserMessage(SyncMessage::BlockVerified)).expect("Error sending BlockVerified message");
|
self.message_channel.send(UserMessage(SyncMessage::BlockVerified)).expect("Error sending BlockVerified message");
|
||||||
@ -132,18 +132,23 @@ impl QueueSignal {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
struct Verification {
|
struct Verification {
|
||||||
unverified: VecDeque<UnverifiedBlock>,
|
// All locks must be captured in the order declared here.
|
||||||
verified: VecDeque<PreverifiedBlock>,
|
unverified: Mutex<VecDeque<UnverifiedBlock>>,
|
||||||
verifying: VecDeque<VerifyingBlock>,
|
verified: Mutex<VecDeque<PreverifiedBlock>>,
|
||||||
bad: HashSet<H256>,
|
verifying: Mutex<VecDeque<VerifyingBlock>>,
|
||||||
|
bad: Mutex<HashSet<H256>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BlockQueue {
|
impl BlockQueue {
|
||||||
/// Creates a new queue instance.
|
/// Creates a new queue instance.
|
||||||
pub fn new(config: BlockQueueConfig, engine: Arc<Box<Engine>>, message_channel: IoChannel<NetSyncMessage>) -> BlockQueue {
|
pub fn new(config: BlockQueueConfig, engine: Arc<Box<Engine>>, message_channel: IoChannel<NetSyncMessage>) -> BlockQueue {
|
||||||
let verification = Arc::new(Mutex::new(Verification::default()));
|
let verification = Arc::new(Verification {
|
||||||
|
unverified: Mutex::new(VecDeque::new()),
|
||||||
|
verified: Mutex::new(VecDeque::new()),
|
||||||
|
verifying: Mutex::new(VecDeque::new()),
|
||||||
|
bad: Mutex::new(HashSet::new()),
|
||||||
|
});
|
||||||
let more_to_verify = Arc::new(Condvar::new());
|
let more_to_verify = Arc::new(Condvar::new());
|
||||||
let ready_signal = Arc::new(QueueSignal { signalled: AtomicBool::new(false), message_channel: message_channel });
|
let ready_signal = Arc::new(QueueSignal { signalled: AtomicBool::new(false), message_channel: message_channel });
|
||||||
let deleting = Arc::new(AtomicBool::new(false));
|
let deleting = Arc::new(AtomicBool::new(false));
|
||||||
@ -186,17 +191,17 @@ impl BlockQueue {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify(verification: Arc<Mutex<Verification>>, engine: Arc<Box<Engine>>, wait: Arc<Condvar>, ready: Arc<QueueSignal>, deleting: Arc<AtomicBool>, empty: Arc<Condvar>) {
|
fn verify(verification: Arc<Verification>, engine: Arc<Box<Engine>>, wait: Arc<Condvar>, ready: Arc<QueueSignal>, deleting: Arc<AtomicBool>, empty: Arc<Condvar>) {
|
||||||
while !deleting.load(AtomicOrdering::Acquire) {
|
while !deleting.load(AtomicOrdering::Acquire) {
|
||||||
{
|
{
|
||||||
let mut lock = verification.lock().unwrap();
|
let mut unverified = verification.unverified.lock().unwrap();
|
||||||
|
|
||||||
if lock.unverified.is_empty() && lock.verifying.is_empty() {
|
if unverified.is_empty() && verification.verifying.lock().unwrap().is_empty() {
|
||||||
empty.notify_all();
|
empty.notify_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
while lock.unverified.is_empty() && !deleting.load(AtomicOrdering::Acquire) {
|
while unverified.is_empty() && !deleting.load(AtomicOrdering::Acquire) {
|
||||||
lock = wait.wait(lock).unwrap();
|
unverified = wait.wait(unverified).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
if deleting.load(AtomicOrdering::Acquire) {
|
if deleting.load(AtomicOrdering::Acquire) {
|
||||||
@ -205,39 +210,42 @@ impl BlockQueue {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let block = {
|
let block = {
|
||||||
let mut v = verification.lock().unwrap();
|
let mut unverified = verification.unverified.lock().unwrap();
|
||||||
if v.unverified.is_empty() {
|
if unverified.is_empty() {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
let block = v.unverified.pop_front().unwrap();
|
let mut verifying = verification.verifying.lock().unwrap();
|
||||||
v.verifying.push_back(VerifyingBlock{ hash: block.header.hash(), block: None });
|
let block = unverified.pop_front().unwrap();
|
||||||
|
verifying.push_back(VerifyingBlock{ hash: block.header.hash(), block: None });
|
||||||
block
|
block
|
||||||
};
|
};
|
||||||
|
|
||||||
let block_hash = block.header.hash();
|
let block_hash = block.header.hash();
|
||||||
match verify_block_unordered(block.header, block.bytes, engine.deref().deref()) {
|
match verify_block_unordered(block.header, block.bytes, engine.deref().deref()) {
|
||||||
Ok(verified) => {
|
Ok(verified) => {
|
||||||
let mut v = verification.lock().unwrap();
|
let mut verifying = verification.verifying.lock().unwrap();
|
||||||
for e in &mut v.verifying {
|
for e in verifying.iter_mut() {
|
||||||
if e.hash == block_hash {
|
if e.hash == block_hash {
|
||||||
e.block = Some(verified);
|
e.block = Some(verified);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !v.verifying.is_empty() && v.verifying.front().unwrap().hash == block_hash {
|
if !verifying.is_empty() && verifying.front().unwrap().hash == block_hash {
|
||||||
// we're next!
|
// we're next!
|
||||||
let mut vref = v.deref_mut();
|
let mut verified = verification.verified.lock().unwrap();
|
||||||
BlockQueue::drain_verifying(&mut vref.verifying, &mut vref.verified, &mut vref.bad);
|
let mut bad = verification.bad.lock().unwrap();
|
||||||
|
BlockQueue::drain_verifying(&mut verifying, &mut verified, &mut bad);
|
||||||
ready.set();
|
ready.set();
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
let mut v = verification.lock().unwrap();
|
let mut verifying = verification.verifying.lock().unwrap();
|
||||||
|
let mut verified = verification.verified.lock().unwrap();
|
||||||
|
let mut bad = verification.bad.lock().unwrap();
|
||||||
warn!(target: "client", "Stage 2 block verification failed for {}\nError: {:?}", block_hash, err);
|
warn!(target: "client", "Stage 2 block verification failed for {}\nError: {:?}", block_hash, err);
|
||||||
v.bad.insert(block_hash.clone());
|
bad.insert(block_hash.clone());
|
||||||
v.verifying.retain(|e| e.hash != block_hash);
|
verifying.retain(|e| e.hash != block_hash);
|
||||||
let mut vref = v.deref_mut();
|
BlockQueue::drain_verifying(&mut verifying, &mut verified, &mut bad);
|
||||||
BlockQueue::drain_verifying(&mut vref.verifying, &mut vref.verified, &mut vref.bad);
|
|
||||||
ready.set();
|
ready.set();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -257,19 +265,21 @@ impl BlockQueue {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Clear the queue and stop verification activity.
|
/// Clear the queue and stop verification activity.
|
||||||
pub fn clear(&mut self) {
|
pub fn clear(&self) {
|
||||||
let mut verification = self.verification.lock().unwrap();
|
let mut unverified = self.verification.unverified.lock().unwrap();
|
||||||
verification.unverified.clear();
|
let mut verifying = self.verification.verifying.lock().unwrap();
|
||||||
verification.verifying.clear();
|
let mut verified = self.verification.verified.lock().unwrap();
|
||||||
verification.verified.clear();
|
unverified.clear();
|
||||||
|
verifying.clear();
|
||||||
|
verified.clear();
|
||||||
self.processing.write().unwrap().clear();
|
self.processing.write().unwrap().clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Wait for queue to be empty
|
/// Wait for unverified queue to be empty
|
||||||
pub fn flush(&mut self) {
|
pub fn flush(&self) {
|
||||||
let mut verification = self.verification.lock().unwrap();
|
let mut unverified = self.verification.unverified.lock().unwrap();
|
||||||
while !verification.unverified.is_empty() || !verification.verifying.is_empty() {
|
while !unverified.is_empty() || !self.verification.verifying.lock().unwrap().is_empty() {
|
||||||
verification = self.empty.wait(verification).unwrap();
|
unverified = self.empty.wait(unverified).unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -278,27 +288,28 @@ impl BlockQueue {
|
|||||||
if self.processing.read().unwrap().contains(&hash) {
|
if self.processing.read().unwrap().contains(&hash) {
|
||||||
return BlockStatus::Queued;
|
return BlockStatus::Queued;
|
||||||
}
|
}
|
||||||
if self.verification.lock().unwrap().bad.contains(&hash) {
|
if self.verification.bad.lock().unwrap().contains(&hash) {
|
||||||
return BlockStatus::Bad;
|
return BlockStatus::Bad;
|
||||||
}
|
}
|
||||||
BlockStatus::Unknown
|
BlockStatus::Unknown
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a block to the queue.
|
/// Add a block to the queue.
|
||||||
pub fn import_block(&mut self, bytes: Bytes) -> ImportResult {
|
pub fn import_block(&self, bytes: Bytes) -> ImportResult {
|
||||||
let header = BlockView::new(&bytes).header();
|
let header = BlockView::new(&bytes).header();
|
||||||
let h = header.hash();
|
let h = header.hash();
|
||||||
if self.processing.read().unwrap().contains(&h) {
|
|
||||||
return Err(x!(ImportError::AlreadyQueued));
|
|
||||||
}
|
|
||||||
{
|
{
|
||||||
let mut verification = self.verification.lock().unwrap();
|
if self.processing.read().unwrap().contains(&h) {
|
||||||
if verification.bad.contains(&h) {
|
return Err(x!(ImportError::AlreadyQueued));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut bad = self.verification.bad.lock().unwrap();
|
||||||
|
if bad.contains(&h) {
|
||||||
return Err(x!(ImportError::KnownBad));
|
return Err(x!(ImportError::KnownBad));
|
||||||
}
|
}
|
||||||
|
|
||||||
if verification.bad.contains(&header.parent_hash) {
|
if bad.contains(&header.parent_hash) {
|
||||||
verification.bad.insert(h.clone());
|
bad.insert(h.clone());
|
||||||
return Err(x!(ImportError::KnownBad));
|
return Err(x!(ImportError::KnownBad));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -306,48 +317,47 @@ impl BlockQueue {
|
|||||||
match verify_block_basic(&header, &bytes, self.engine.deref().deref()) {
|
match verify_block_basic(&header, &bytes, self.engine.deref().deref()) {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
self.processing.write().unwrap().insert(h.clone());
|
self.processing.write().unwrap().insert(h.clone());
|
||||||
self.verification.lock().unwrap().unverified.push_back(UnverifiedBlock { header: header, bytes: bytes });
|
self.verification.unverified.lock().unwrap().push_back(UnverifiedBlock { header: header, bytes: bytes });
|
||||||
self.more_to_verify.notify_all();
|
self.more_to_verify.notify_all();
|
||||||
Ok(h)
|
Ok(h)
|
||||||
},
|
},
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
warn!(target: "client", "Stage 1 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), err);
|
warn!(target: "client", "Stage 1 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), err);
|
||||||
self.verification.lock().unwrap().bad.insert(h.clone());
|
self.verification.bad.lock().unwrap().insert(h.clone());
|
||||||
Err(err)
|
Err(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Mark given block and all its children as bad. Stops verification.
|
/// Mark given block and all its children as bad. Stops verification.
|
||||||
pub fn mark_as_bad(&mut self, block_hashes: &[H256]) {
|
pub fn mark_as_bad(&self, block_hashes: &[H256]) {
|
||||||
if block_hashes.is_empty() {
|
if block_hashes.is_empty() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
let mut verification_lock = self.verification.lock().unwrap();
|
let mut verified_lock = self.verification.verified.lock().unwrap();
|
||||||
|
let mut verified = verified_lock.deref_mut();
|
||||||
|
let mut bad = self.verification.bad.lock().unwrap();
|
||||||
let mut processing = self.processing.write().unwrap();
|
let mut processing = self.processing.write().unwrap();
|
||||||
|
bad.reserve(block_hashes.len());
|
||||||
let mut verification = verification_lock.deref_mut();
|
|
||||||
|
|
||||||
verification.bad.reserve(block_hashes.len());
|
|
||||||
for hash in block_hashes {
|
for hash in block_hashes {
|
||||||
verification.bad.insert(hash.clone());
|
bad.insert(hash.clone());
|
||||||
processing.remove(&hash);
|
processing.remove(&hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut new_verified = VecDeque::new();
|
let mut new_verified = VecDeque::new();
|
||||||
for block in verification.verified.drain(..) {
|
for block in verified.drain(..) {
|
||||||
if verification.bad.contains(&block.header.parent_hash) {
|
if bad.contains(&block.header.parent_hash) {
|
||||||
verification.bad.insert(block.header.hash());
|
bad.insert(block.header.hash());
|
||||||
processing.remove(&block.header.hash());
|
processing.remove(&block.header.hash());
|
||||||
} else {
|
} else {
|
||||||
new_verified.push_back(block);
|
new_verified.push_back(block);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
verification.verified = new_verified;
|
*verified = new_verified;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Mark given block as processed
|
/// Mark given block as processed
|
||||||
pub fn mark_as_good(&mut self, block_hashes: &[H256]) {
|
pub fn mark_as_good(&self, block_hashes: &[H256]) {
|
||||||
if block_hashes.is_empty() {
|
if block_hashes.is_empty() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -358,16 +368,16 @@ impl BlockQueue {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Removes up to `max` verified blocks from the queue
|
/// Removes up to `max` verified blocks from the queue
|
||||||
pub fn drain(&mut self, max: usize) -> Vec<PreverifiedBlock> {
|
pub fn drain(&self, max: usize) -> Vec<PreverifiedBlock> {
|
||||||
let mut verification = self.verification.lock().unwrap();
|
let mut verified = self.verification.verified.lock().unwrap();
|
||||||
let count = min(max, verification.verified.len());
|
let count = min(max, verified.len());
|
||||||
let mut result = Vec::with_capacity(count);
|
let mut result = Vec::with_capacity(count);
|
||||||
for _ in 0..count {
|
for _ in 0..count {
|
||||||
let block = verification.verified.pop_front().unwrap();
|
let block = verified.pop_front().unwrap();
|
||||||
result.push(block);
|
result.push(block);
|
||||||
}
|
}
|
||||||
self.ready_signal.reset();
|
self.ready_signal.reset();
|
||||||
if !verification.verified.is_empty() {
|
if !verified.is_empty() {
|
||||||
self.ready_signal.set();
|
self.ready_signal.set();
|
||||||
}
|
}
|
||||||
result
|
result
|
||||||
@ -375,17 +385,28 @@ impl BlockQueue {
|
|||||||
|
|
||||||
/// Get queue status.
|
/// Get queue status.
|
||||||
pub fn queue_info(&self) -> BlockQueueInfo {
|
pub fn queue_info(&self) -> BlockQueueInfo {
|
||||||
let verification = self.verification.lock().unwrap();
|
let (unverified_len, unverified_bytes) = {
|
||||||
|
let v = self.verification.unverified.lock().unwrap();
|
||||||
|
(v.len(), v.heap_size_of_children())
|
||||||
|
};
|
||||||
|
let (verifying_len, verifying_bytes) = {
|
||||||
|
let v = self.verification.verifying.lock().unwrap();
|
||||||
|
(v.len(), v.heap_size_of_children())
|
||||||
|
};
|
||||||
|
let (verified_len, verified_bytes) = {
|
||||||
|
let v = self.verification.verified.lock().unwrap();
|
||||||
|
(v.len(), v.heap_size_of_children())
|
||||||
|
};
|
||||||
BlockQueueInfo {
|
BlockQueueInfo {
|
||||||
verified_queue_size: verification.verified.len(),
|
unverified_queue_size: unverified_len,
|
||||||
unverified_queue_size: verification.unverified.len(),
|
verifying_queue_size: verifying_len,
|
||||||
verifying_queue_size: verification.verifying.len(),
|
verified_queue_size: verified_len,
|
||||||
max_queue_size: self.max_queue_size,
|
max_queue_size: self.max_queue_size,
|
||||||
max_mem_use: self.max_mem_use,
|
max_mem_use: self.max_mem_use,
|
||||||
mem_used:
|
mem_used:
|
||||||
verification.unverified.heap_size_of_children()
|
unverified_bytes
|
||||||
+ verification.verifying.heap_size_of_children()
|
+ verifying_bytes
|
||||||
+ verification.verified.heap_size_of_children(),
|
+ verified_bytes
|
||||||
// TODO: https://github.com/servo/heapsize/pull/50
|
// TODO: https://github.com/servo/heapsize/pull/50
|
||||||
//+ self.processing.read().unwrap().heap_size_of_children(),
|
//+ self.processing.read().unwrap().heap_size_of_children(),
|
||||||
}
|
}
|
||||||
@ -393,10 +414,9 @@ impl BlockQueue {
|
|||||||
|
|
||||||
pub fn collect_garbage(&self) {
|
pub fn collect_garbage(&self) {
|
||||||
{
|
{
|
||||||
let mut verification = self.verification.lock().unwrap();
|
self.verification.unverified.lock().unwrap().shrink_to_fit();
|
||||||
verification.unverified.shrink_to_fit();
|
self.verification.verifying.lock().unwrap().shrink_to_fit();
|
||||||
verification.verifying.shrink_to_fit();
|
self.verification.verified.lock().unwrap().shrink_to_fit();
|
||||||
verification.verified.shrink_to_fit();
|
|
||||||
}
|
}
|
||||||
self.processing.write().unwrap().shrink_to_fit();
|
self.processing.write().unwrap().shrink_to_fit();
|
||||||
}
|
}
|
||||||
@ -444,7 +464,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_import_blocks() {
|
fn can_import_blocks() {
|
||||||
let mut queue = get_test_queue();
|
let queue = get_test_queue();
|
||||||
if let Err(e) = queue.import_block(get_good_dummy_block()) {
|
if let Err(e) = queue.import_block(get_good_dummy_block()) {
|
||||||
panic!("error importing block that is valid by definition({:?})", e);
|
panic!("error importing block that is valid by definition({:?})", e);
|
||||||
}
|
}
|
||||||
@ -452,7 +472,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn returns_error_for_duplicates() {
|
fn returns_error_for_duplicates() {
|
||||||
let mut queue = get_test_queue();
|
let queue = get_test_queue();
|
||||||
if let Err(e) = queue.import_block(get_good_dummy_block()) {
|
if let Err(e) = queue.import_block(get_good_dummy_block()) {
|
||||||
panic!("error importing block that is valid by definition({:?})", e);
|
panic!("error importing block that is valid by definition({:?})", e);
|
||||||
}
|
}
|
||||||
@ -471,7 +491,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn returns_ok_for_drained_duplicates() {
|
fn returns_ok_for_drained_duplicates() {
|
||||||
let mut queue = get_test_queue();
|
let queue = get_test_queue();
|
||||||
let block = get_good_dummy_block();
|
let block = get_good_dummy_block();
|
||||||
let hash = BlockView::new(&block).header().hash().clone();
|
let hash = BlockView::new(&block).header().hash().clone();
|
||||||
if let Err(e) = queue.import_block(block) {
|
if let Err(e) = queue.import_block(block) {
|
||||||
@ -488,7 +508,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn returns_empty_once_finished() {
|
fn returns_empty_once_finished() {
|
||||||
let mut queue = get_test_queue();
|
let queue = get_test_queue();
|
||||||
queue.import_block(get_good_dummy_block()).expect("error importing block that is valid by definition");
|
queue.import_block(get_good_dummy_block()).expect("error importing block that is valid by definition");
|
||||||
queue.flush();
|
queue.flush();
|
||||||
queue.drain(1);
|
queue.drain(1);
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
|
|
||||||
//! Blockchain database.
|
//! Blockchain database.
|
||||||
|
|
||||||
|
use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrder};
|
||||||
use util::*;
|
use util::*;
|
||||||
use header::*;
|
use header::*;
|
||||||
use extras::*;
|
use extras::*;
|
||||||
@ -134,8 +135,9 @@ struct CacheManager {
|
|||||||
///
|
///
|
||||||
/// **Does not do input data verification.**
|
/// **Does not do input data verification.**
|
||||||
pub struct BlockChain {
|
pub struct BlockChain {
|
||||||
pref_cache_size: usize,
|
// All locks must be captured in the order declared here.
|
||||||
max_cache_size: usize,
|
pref_cache_size: AtomicUsize,
|
||||||
|
max_cache_size: AtomicUsize,
|
||||||
|
|
||||||
best_block: RwLock<BestBlock>,
|
best_block: RwLock<BestBlock>,
|
||||||
|
|
||||||
@ -157,6 +159,8 @@ pub struct BlockChain {
|
|||||||
|
|
||||||
// blooms indexing
|
// blooms indexing
|
||||||
bloom_indexer: BloomIndexer,
|
bloom_indexer: BloomIndexer,
|
||||||
|
|
||||||
|
insert_lock: Mutex<()>
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FilterDataSource for BlockChain {
|
impl FilterDataSource for BlockChain {
|
||||||
@ -262,8 +266,8 @@ impl BlockChain {
|
|||||||
(0..COLLECTION_QUEUE_SIZE).foreach(|_| cache_man.cache_usage.push_back(HashSet::new()));
|
(0..COLLECTION_QUEUE_SIZE).foreach(|_| cache_man.cache_usage.push_back(HashSet::new()));
|
||||||
|
|
||||||
let bc = BlockChain {
|
let bc = BlockChain {
|
||||||
pref_cache_size: config.pref_cache_size,
|
pref_cache_size: AtomicUsize::new(config.pref_cache_size),
|
||||||
max_cache_size: config.max_cache_size,
|
max_cache_size: AtomicUsize::new(config.max_cache_size),
|
||||||
best_block: RwLock::new(BestBlock::default()),
|
best_block: RwLock::new(BestBlock::default()),
|
||||||
blocks: RwLock::new(HashMap::new()),
|
blocks: RwLock::new(HashMap::new()),
|
||||||
block_details: RwLock::new(HashMap::new()),
|
block_details: RwLock::new(HashMap::new()),
|
||||||
@ -275,7 +279,8 @@ impl BlockChain {
|
|||||||
extras_db: extras_db,
|
extras_db: extras_db,
|
||||||
blocks_db: blocks_db,
|
blocks_db: blocks_db,
|
||||||
cache_man: RwLock::new(cache_man),
|
cache_man: RwLock::new(cache_man),
|
||||||
bloom_indexer: BloomIndexer::new(BLOOM_INDEX_SIZE, BLOOM_LEVELS)
|
bloom_indexer: BloomIndexer::new(BLOOM_INDEX_SIZE, BLOOM_LEVELS),
|
||||||
|
insert_lock: Mutex::new(()),
|
||||||
};
|
};
|
||||||
|
|
||||||
// load best block
|
// load best block
|
||||||
@ -318,9 +323,9 @@ impl BlockChain {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Set the cache configuration.
|
/// Set the cache configuration.
|
||||||
pub fn configure_cache(&mut self, pref_cache_size: usize, max_cache_size: usize) {
|
pub fn configure_cache(&self, pref_cache_size: usize, max_cache_size: usize) {
|
||||||
self.pref_cache_size = pref_cache_size;
|
self.pref_cache_size.store(pref_cache_size, AtomicOrder::Relaxed);
|
||||||
self.max_cache_size = max_cache_size;
|
self.max_cache_size.store(max_cache_size, AtomicOrder::Relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a tree route between `from` and `to`, which is a tuple of:
|
/// Returns a tree route between `from` and `to`, which is a tuple of:
|
||||||
@ -424,6 +429,7 @@ impl BlockChain {
|
|||||||
return ImportRoute::none();
|
return ImportRoute::none();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let _lock = self.insert_lock.lock();
|
||||||
// store block in db
|
// store block in db
|
||||||
self.blocks_db.put(&hash, &bytes).unwrap();
|
self.blocks_db.put(&hash, &bytes).unwrap();
|
||||||
|
|
||||||
@ -446,48 +452,58 @@ impl BlockChain {
|
|||||||
let batch = DBTransaction::new();
|
let batch = DBTransaction::new();
|
||||||
batch.put(b"best", &update.info.hash).unwrap();
|
batch.put(b"best", &update.info.hash).unwrap();
|
||||||
|
|
||||||
// update best block
|
{
|
||||||
let mut best_block = self.best_block.write().unwrap();
|
let mut write_details = self.block_details.write().unwrap();
|
||||||
match update.info.location {
|
for (hash, details) in update.block_details.into_iter() {
|
||||||
BlockLocation::Branch => (),
|
batch.put_extras(&hash, &details);
|
||||||
_ => {
|
self.note_used(CacheID::Extras(ExtrasIndex::BlockDetails, hash.clone()));
|
||||||
*best_block = BestBlock {
|
write_details.insert(hash, details);
|
||||||
hash: update.info.hash,
|
|
||||||
number: update.info.number,
|
|
||||||
total_difficulty: update.info.total_difficulty
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut write_hashes = self.block_hashes.write().unwrap();
|
{
|
||||||
for (number, hash) in &update.block_hashes {
|
let mut write_receipts = self.block_receipts.write().unwrap();
|
||||||
batch.put_extras(number, hash);
|
for (hash, receipt) in &update.block_receipts {
|
||||||
write_hashes.remove(number);
|
batch.put_extras(hash, receipt);
|
||||||
|
write_receipts.remove(hash);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut write_details = self.block_details.write().unwrap();
|
{
|
||||||
for (hash, details) in update.block_details.into_iter() {
|
let mut write_blocks_blooms = self.blocks_blooms.write().unwrap();
|
||||||
batch.put_extras(&hash, &details);
|
for (bloom_hash, blocks_bloom) in &update.blocks_blooms {
|
||||||
write_details.insert(hash.clone(), details);
|
batch.put_extras(bloom_hash, blocks_bloom);
|
||||||
self.note_used(CacheID::Extras(ExtrasIndex::BlockDetails, hash));
|
write_blocks_blooms.remove(bloom_hash);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut write_receipts = self.block_receipts.write().unwrap();
|
// These cached values must be updated last and togeterh
|
||||||
for (hash, receipt) in &update.block_receipts {
|
{
|
||||||
batch.put_extras(hash, receipt);
|
let mut best_block = self.best_block.write().unwrap();
|
||||||
write_receipts.remove(hash);
|
let mut write_hashes = self.block_hashes.write().unwrap();
|
||||||
}
|
let mut write_txs = self.transaction_addresses.write().unwrap();
|
||||||
|
|
||||||
let mut write_txs = self.transaction_addresses.write().unwrap();
|
// update best block
|
||||||
for (hash, tx_address) in &update.transactions_addresses {
|
match update.info.location {
|
||||||
batch.put_extras(hash, tx_address);
|
BlockLocation::Branch => (),
|
||||||
write_txs.remove(hash);
|
_ => {
|
||||||
}
|
*best_block = BestBlock {
|
||||||
|
hash: update.info.hash,
|
||||||
|
number: update.info.number,
|
||||||
|
total_difficulty: update.info.total_difficulty
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let mut write_blocks_blooms = self.blocks_blooms.write().unwrap();
|
for (number, hash) in &update.block_hashes {
|
||||||
for (bloom_hash, blocks_bloom) in &update.blocks_blooms {
|
batch.put_extras(number, hash);
|
||||||
batch.put_extras(bloom_hash, blocks_bloom);
|
write_hashes.remove(number);
|
||||||
write_blocks_blooms.remove(bloom_hash);
|
}
|
||||||
|
|
||||||
|
for (hash, tx_address) in &update.transactions_addresses {
|
||||||
|
batch.put_extras(hash, tx_address);
|
||||||
|
write_txs.remove(hash);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// update extras database
|
// update extras database
|
||||||
@ -781,11 +797,10 @@ impl BlockChain {
|
|||||||
|
|
||||||
/// Ticks our cache system and throws out any old data.
|
/// Ticks our cache system and throws out any old data.
|
||||||
pub fn collect_garbage(&self) {
|
pub fn collect_garbage(&self) {
|
||||||
if self.cache_size().total() < self.pref_cache_size { return; }
|
if self.cache_size().total() < self.pref_cache_size.load(AtomicOrder::Relaxed) { return; }
|
||||||
|
|
||||||
for _ in 0..COLLECTION_QUEUE_SIZE {
|
for _ in 0..COLLECTION_QUEUE_SIZE {
|
||||||
{
|
{
|
||||||
let mut cache_man = self.cache_man.write().unwrap();
|
|
||||||
let mut blocks = self.blocks.write().unwrap();
|
let mut blocks = self.blocks.write().unwrap();
|
||||||
let mut block_details = self.block_details.write().unwrap();
|
let mut block_details = self.block_details.write().unwrap();
|
||||||
let mut block_hashes = self.block_hashes.write().unwrap();
|
let mut block_hashes = self.block_hashes.write().unwrap();
|
||||||
@ -793,6 +808,7 @@ impl BlockChain {
|
|||||||
let mut block_logs = self.block_logs.write().unwrap();
|
let mut block_logs = self.block_logs.write().unwrap();
|
||||||
let mut blocks_blooms = self.blocks_blooms.write().unwrap();
|
let mut blocks_blooms = self.blocks_blooms.write().unwrap();
|
||||||
let mut block_receipts = self.block_receipts.write().unwrap();
|
let mut block_receipts = self.block_receipts.write().unwrap();
|
||||||
|
let mut cache_man = self.cache_man.write().unwrap();
|
||||||
|
|
||||||
for id in cache_man.cache_usage.pop_back().unwrap().into_iter() {
|
for id in cache_man.cache_usage.pop_back().unwrap().into_iter() {
|
||||||
cache_man.in_use.remove(&id);
|
cache_man.in_use.remove(&id);
|
||||||
@ -819,7 +835,7 @@ impl BlockChain {
|
|||||||
blocks_blooms.shrink_to_fit();
|
blocks_blooms.shrink_to_fit();
|
||||||
block_receipts.shrink_to_fit();
|
block_receipts.shrink_to_fit();
|
||||||
}
|
}
|
||||||
if self.cache_size().total() < self.max_cache_size { break; }
|
if self.cache_size().total() < self.max_cache_size.load(AtomicOrder::Relaxed) { break; }
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: m_lastCollection = chrono::system_clock::now();
|
// TODO: m_lastCollection = chrono::system_clock::now();
|
||||||
@ -891,7 +907,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
||||||
fn test_find_uncles() {
|
fn test_find_uncles() {
|
||||||
let mut canon_chain = ChainGenerator::default();
|
let mut canon_chain = ChainGenerator::default();
|
||||||
let mut finalizer = BlockFinalizer::default();
|
let mut finalizer = BlockFinalizer::default();
|
||||||
@ -929,7 +945,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
||||||
fn test_small_fork() {
|
fn test_small_fork() {
|
||||||
let mut canon_chain = ChainGenerator::default();
|
let mut canon_chain = ChainGenerator::default();
|
||||||
let mut finalizer = BlockFinalizer::default();
|
let mut finalizer = BlockFinalizer::default();
|
||||||
|
@ -23,9 +23,9 @@ mod bloom_indexer;
|
|||||||
mod cache;
|
mod cache;
|
||||||
mod tree_route;
|
mod tree_route;
|
||||||
mod update;
|
mod update;
|
||||||
|
mod import_route;
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod generator;
|
mod generator;
|
||||||
mod import_route;
|
|
||||||
|
|
||||||
pub use self::blockchain::{BlockProvider, BlockChain, BlockChainConfig};
|
pub use self::blockchain::{BlockProvider, BlockChain, BlockChainConfig};
|
||||||
pub use self::cache::CacheSize;
|
pub use self::cache::CacheSize;
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use util::*;
|
use util::*;
|
||||||
use util::panics::*;
|
use util::panics::*;
|
||||||
use blockchain::{BlockChain, BlockProvider};
|
|
||||||
use views::BlockView;
|
use views::BlockView;
|
||||||
use error::*;
|
use error::*;
|
||||||
use header::{BlockNumber};
|
use header::{BlockNumber};
|
||||||
@ -27,7 +26,6 @@ use state::State;
|
|||||||
use spec::Spec;
|
use spec::Spec;
|
||||||
use engine::Engine;
|
use engine::Engine;
|
||||||
use views::HeaderView;
|
use views::HeaderView;
|
||||||
use block_queue::BlockQueue;
|
|
||||||
use service::{NetSyncMessage, SyncMessage};
|
use service::{NetSyncMessage, SyncMessage};
|
||||||
use env_info::LastHashes;
|
use env_info::LastHashes;
|
||||||
use verification::*;
|
use verification::*;
|
||||||
@ -36,33 +34,10 @@ use transaction::{LocalizedTransaction, SignedTransaction};
|
|||||||
use extras::TransactionAddress;
|
use extras::TransactionAddress;
|
||||||
use filter::Filter;
|
use filter::Filter;
|
||||||
use log_entry::LocalizedLogEntry;
|
use log_entry::LocalizedLogEntry;
|
||||||
use util::keys::store::SecretStore;
|
use block_queue::{BlockQueue, BlockQueueInfo};
|
||||||
pub use block_queue::{BlockQueueConfig, BlockQueueInfo};
|
use blockchain::{BlockChain, BlockProvider, TreeRoute};
|
||||||
pub use blockchain::{TreeRoute, BlockChainConfig, CacheSize as BlockChainCacheSize};
|
use client::{BlockId, TransactionId, ClientConfig, BlockChainClient};
|
||||||
|
pub use blockchain::CacheSize as BlockChainCacheSize;
|
||||||
/// Uniquely identifies block.
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum BlockId {
|
|
||||||
/// Block's sha3.
|
|
||||||
/// Querying by hash is always faster.
|
|
||||||
Hash(H256),
|
|
||||||
/// Block number within canon blockchain.
|
|
||||||
Number(BlockNumber),
|
|
||||||
/// Earliest block (genesis).
|
|
||||||
Earliest,
|
|
||||||
/// Latest mined block.
|
|
||||||
Latest
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Uniquely identifies transaction.
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum TransactionId {
|
|
||||||
/// Transaction's sha3.
|
|
||||||
Hash(H256),
|
|
||||||
/// Block id and transaction index within this block.
|
|
||||||
/// Querying by block position is always faster.
|
|
||||||
Location(BlockId, usize)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// General block status
|
/// General block status
|
||||||
#[derive(Debug, Eq, PartialEq)]
|
#[derive(Debug, Eq, PartialEq)]
|
||||||
@ -77,30 +52,6 @@ pub enum BlockStatus {
|
|||||||
Unknown,
|
Unknown,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Client configuration. Includes configs for all sub-systems.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct ClientConfig {
|
|
||||||
/// Block queue configuration.
|
|
||||||
pub queue: BlockQueueConfig,
|
|
||||||
/// Blockchain configuration.
|
|
||||||
pub blockchain: BlockChainConfig,
|
|
||||||
/// Prefer journal rather than archive.
|
|
||||||
pub prefer_journal: bool,
|
|
||||||
/// The name of the client instance.
|
|
||||||
pub name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for ClientConfig {
|
|
||||||
fn default() -> ClientConfig {
|
|
||||||
ClientConfig {
|
|
||||||
queue: Default::default(),
|
|
||||||
blockchain: Default::default(),
|
|
||||||
prefer_journal: false,
|
|
||||||
name: Default::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Information about the blockchain gathered together.
|
/// Information about the blockchain gathered together.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct BlockChainInfo {
|
pub struct BlockChainInfo {
|
||||||
@ -122,79 +73,8 @@ impl fmt::Display for BlockChainInfo {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Blockchain database client. Owns and manages a blockchain and a block queue.
|
|
||||||
pub trait BlockChainClient : Sync + Send {
|
|
||||||
/// Get raw block header data by block id.
|
|
||||||
fn block_header(&self, id: BlockId) -> Option<Bytes>;
|
|
||||||
|
|
||||||
/// Get raw block body data by block id.
|
|
||||||
/// Block body is an RLP list of two items: uncles and transactions.
|
|
||||||
fn block_body(&self, id: BlockId) -> Option<Bytes>;
|
|
||||||
|
|
||||||
/// Get raw block data by block header hash.
|
|
||||||
fn block(&self, id: BlockId) -> Option<Bytes>;
|
|
||||||
|
|
||||||
/// Get block status by block header hash.
|
|
||||||
fn block_status(&self, id: BlockId) -> BlockStatus;
|
|
||||||
|
|
||||||
/// Get block total difficulty.
|
|
||||||
fn block_total_difficulty(&self, id: BlockId) -> Option<U256>;
|
|
||||||
|
|
||||||
/// Get address nonce.
|
|
||||||
fn nonce(&self, address: &Address) -> U256;
|
|
||||||
|
|
||||||
/// Get block hash.
|
|
||||||
fn block_hash(&self, id: BlockId) -> Option<H256>;
|
|
||||||
|
|
||||||
/// Get address code.
|
|
||||||
fn code(&self, address: &Address) -> Option<Bytes>;
|
|
||||||
|
|
||||||
/// Get transaction with given hash.
|
|
||||||
fn transaction(&self, id: TransactionId) -> Option<LocalizedTransaction>;
|
|
||||||
|
|
||||||
/// Get a tree route between `from` and `to`.
|
|
||||||
/// See `BlockChain::tree_route`.
|
|
||||||
fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute>;
|
|
||||||
|
|
||||||
/// Get latest state node
|
|
||||||
fn state_data(&self, hash: &H256) -> Option<Bytes>;
|
|
||||||
|
|
||||||
/// Get raw block receipts data by block header hash.
|
|
||||||
fn block_receipts(&self, hash: &H256) -> Option<Bytes>;
|
|
||||||
|
|
||||||
/// Import a block into the blockchain.
|
|
||||||
fn import_block(&self, bytes: Bytes) -> ImportResult;
|
|
||||||
|
|
||||||
/// Get block queue information.
|
|
||||||
fn queue_info(&self) -> BlockQueueInfo;
|
|
||||||
|
|
||||||
/// Clear block queue and abort all import activity.
|
|
||||||
fn clear_queue(&self);
|
|
||||||
|
|
||||||
/// Get blockchain information.
|
|
||||||
fn chain_info(&self) -> BlockChainInfo;
|
|
||||||
|
|
||||||
/// Get the best block header.
|
|
||||||
fn best_block_header(&self) -> Bytes {
|
|
||||||
self.block_header(BlockId::Hash(self.chain_info().best_block_hash)).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns numbers of blocks containing given bloom.
|
|
||||||
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option<Vec<BlockNumber>>;
|
|
||||||
|
|
||||||
/// Returns logs matching given filter.
|
|
||||||
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry>;
|
|
||||||
|
|
||||||
/// Returns ClosedBlock prepared for sealing.
|
|
||||||
fn prepare_sealing(&self, author: Address, extra_data: Bytes, transactions: Vec<SignedTransaction>) -> Option<ClosedBlock>;
|
|
||||||
|
|
||||||
/// Attempts to seal given block. Returns `SealedBlock` on success and the same block in case of error.
|
|
||||||
fn try_seal(&self, block: ClosedBlock, seal: Vec<Bytes>) -> Result<SealedBlock, ClosedBlock>;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Default, Clone, Debug, Eq, PartialEq)]
|
|
||||||
/// Report on the status of a client.
|
/// Report on the status of a client.
|
||||||
|
#[derive(Default, Clone, Debug, Eq, PartialEq)]
|
||||||
pub struct ClientReport {
|
pub struct ClientReport {
|
||||||
/// How many blocks have been imported so far.
|
/// How many blocks have been imported so far.
|
||||||
pub blocks_imported: usize,
|
pub blocks_imported: usize,
|
||||||
@ -218,15 +98,14 @@ impl ClientReport {
|
|||||||
/// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue.
|
/// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue.
|
||||||
/// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue.
|
/// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue.
|
||||||
pub struct Client<V = CanonVerifier> where V: Verifier {
|
pub struct Client<V = CanonVerifier> where V: Verifier {
|
||||||
chain: Arc<RwLock<BlockChain>>,
|
chain: Arc<BlockChain>,
|
||||||
engine: Arc<Box<Engine>>,
|
engine: Arc<Box<Engine>>,
|
||||||
state_db: Mutex<JournalDB>,
|
state_db: Mutex<JournalDB>,
|
||||||
block_queue: RwLock<BlockQueue>,
|
block_queue: BlockQueue,
|
||||||
report: RwLock<ClientReport>,
|
report: RwLock<ClientReport>,
|
||||||
import_lock: Mutex<()>,
|
import_lock: Mutex<()>,
|
||||||
panic_handler: Arc<PanicHandler>,
|
panic_handler: Arc<PanicHandler>,
|
||||||
verifier: PhantomData<V>,
|
verifier: PhantomData<V>,
|
||||||
secret_store: Arc<RwLock<SecretStore>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const HISTORY: u64 = 1000;
|
const HISTORY: u64 = 1000;
|
||||||
@ -248,7 +127,7 @@ impl<V> Client<V> where V: Verifier {
|
|||||||
dir.push(format!("v{}-sec-{}", CLIENT_DB_VER_STR, if config.prefer_journal { "pruned" } else { "archive" }));
|
dir.push(format!("v{}-sec-{}", CLIENT_DB_VER_STR, if config.prefer_journal { "pruned" } else { "archive" }));
|
||||||
let path = dir.as_path();
|
let path = dir.as_path();
|
||||||
let gb = spec.genesis_block();
|
let gb = spec.genesis_block();
|
||||||
let chain = Arc::new(RwLock::new(BlockChain::new(config.blockchain, &gb, path)));
|
let chain = Arc::new(BlockChain::new(config.blockchain, &gb, path));
|
||||||
let mut state_path = path.to_path_buf();
|
let mut state_path = path.to_path_buf();
|
||||||
state_path.push("state");
|
state_path.push("state");
|
||||||
|
|
||||||
@ -262,34 +141,29 @@ impl<V> Client<V> where V: Verifier {
|
|||||||
let panic_handler = PanicHandler::new_in_arc();
|
let panic_handler = PanicHandler::new_in_arc();
|
||||||
panic_handler.forward_from(&block_queue);
|
panic_handler.forward_from(&block_queue);
|
||||||
|
|
||||||
let secret_store = Arc::new(RwLock::new(SecretStore::new()));
|
|
||||||
secret_store.write().unwrap().try_import_existing();
|
|
||||||
|
|
||||||
Ok(Arc::new(Client {
|
Ok(Arc::new(Client {
|
||||||
chain: chain,
|
chain: chain,
|
||||||
engine: engine,
|
engine: engine,
|
||||||
state_db: Mutex::new(state_db),
|
state_db: Mutex::new(state_db),
|
||||||
block_queue: RwLock::new(block_queue),
|
block_queue: block_queue,
|
||||||
report: RwLock::new(Default::default()),
|
report: RwLock::new(Default::default()),
|
||||||
import_lock: Mutex::new(()),
|
import_lock: Mutex::new(()),
|
||||||
panic_handler: panic_handler,
|
panic_handler: panic_handler,
|
||||||
verifier: PhantomData,
|
verifier: PhantomData,
|
||||||
secret_store: secret_store,
|
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Flush the block import queue.
|
/// Flush the block import queue.
|
||||||
pub fn flush_queue(&self) {
|
pub fn flush_queue(&self) {
|
||||||
self.block_queue.write().unwrap().flush();
|
self.block_queue.flush();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn build_last_hashes(&self, parent_hash: H256) -> LastHashes {
|
fn build_last_hashes(&self, parent_hash: H256) -> LastHashes {
|
||||||
let mut last_hashes = LastHashes::new();
|
let mut last_hashes = LastHashes::new();
|
||||||
last_hashes.resize(256, H256::new());
|
last_hashes.resize(256, H256::new());
|
||||||
last_hashes[0] = parent_hash;
|
last_hashes[0] = parent_hash;
|
||||||
let chain = self.chain.read().unwrap();
|
|
||||||
for i in 0..255 {
|
for i in 0..255 {
|
||||||
match chain.block_details(&last_hashes[i]) {
|
match self.chain.block_details(&last_hashes[i]) {
|
||||||
Some(details) => {
|
Some(details) => {
|
||||||
last_hashes[i + 1] = details.parent.clone();
|
last_hashes[i + 1] = details.parent.clone();
|
||||||
},
|
},
|
||||||
@ -299,31 +173,26 @@ impl<V> Client<V> where V: Verifier {
|
|||||||
last_hashes
|
last_hashes
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Secret store (key manager)
|
|
||||||
pub fn secret_store(&self) -> &Arc<RwLock<SecretStore>> {
|
|
||||||
&self.secret_store
|
|
||||||
}
|
|
||||||
|
|
||||||
fn check_and_close_block(&self, block: &PreverifiedBlock) -> Result<ClosedBlock, ()> {
|
fn check_and_close_block(&self, block: &PreverifiedBlock) -> Result<ClosedBlock, ()> {
|
||||||
let engine = self.engine.deref().deref();
|
let engine = self.engine.deref().deref();
|
||||||
let header = &block.header;
|
let header = &block.header;
|
||||||
|
|
||||||
// Check the block isn't so old we won't be able to enact it.
|
// Check the block isn't so old we won't be able to enact it.
|
||||||
let best_block_number = self.chain.read().unwrap().best_block_number();
|
let best_block_number = self.chain.best_block_number();
|
||||||
if best_block_number >= HISTORY && header.number() <= best_block_number - HISTORY {
|
if best_block_number >= HISTORY && header.number() <= best_block_number - HISTORY {
|
||||||
warn!(target: "client", "Block import failed for #{} ({})\nBlock is ancient (current best block: #{}).", header.number(), header.hash(), best_block_number);
|
warn!(target: "client", "Block import failed for #{} ({})\nBlock is ancient (current best block: #{}).", header.number(), header.hash(), best_block_number);
|
||||||
return Err(());
|
return Err(());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify Block Family
|
// Verify Block Family
|
||||||
let verify_family_result = V::verify_block_family(&header, &block.bytes, engine, self.chain.read().unwrap().deref());
|
let verify_family_result = V::verify_block_family(&header, &block.bytes, engine, self.chain.deref());
|
||||||
if let Err(e) = verify_family_result {
|
if let Err(e) = verify_family_result {
|
||||||
warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
||||||
return Err(());
|
return Err(());
|
||||||
};
|
};
|
||||||
|
|
||||||
// Check if Parent is in chain
|
// Check if Parent is in chain
|
||||||
let chain_has_parent = self.chain.read().unwrap().block_header(&header.parent_hash);
|
let chain_has_parent = self.chain.block_header(&header.parent_hash);
|
||||||
if let None = chain_has_parent {
|
if let None = chain_has_parent {
|
||||||
warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash);
|
warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash);
|
||||||
return Err(());
|
return Err(());
|
||||||
@ -358,7 +227,7 @@ impl<V> Client<V> where V: Verifier {
|
|||||||
let mut bad_blocks = HashSet::new();
|
let mut bad_blocks = HashSet::new();
|
||||||
|
|
||||||
let _import_lock = self.import_lock.lock();
|
let _import_lock = self.import_lock.lock();
|
||||||
let blocks = self.block_queue.write().unwrap().drain(max_blocks_to_import);
|
let blocks = self.block_queue.drain(max_blocks_to_import);
|
||||||
|
|
||||||
let original_best = self.chain_info().best_block_hash;
|
let original_best = self.chain_info().best_block_hash;
|
||||||
|
|
||||||
@ -379,8 +248,7 @@ impl<V> Client<V> where V: Verifier {
|
|||||||
// Are we committing an era?
|
// Are we committing an era?
|
||||||
let ancient = if header.number() >= HISTORY {
|
let ancient = if header.number() >= HISTORY {
|
||||||
let n = header.number() - HISTORY;
|
let n = header.number() - HISTORY;
|
||||||
let chain = self.chain.read().unwrap();
|
Some((n, self.chain.block_hash(n).unwrap()))
|
||||||
Some((n, chain.block_hash(n).unwrap()))
|
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
@ -394,8 +262,7 @@ impl<V> Client<V> where V: Verifier {
|
|||||||
|
|
||||||
// And update the chain after commit to prevent race conditions
|
// And update the chain after commit to prevent race conditions
|
||||||
// (when something is in chain but you are not able to fetch details)
|
// (when something is in chain but you are not able to fetch details)
|
||||||
self.chain.write().unwrap()
|
self.chain.insert_block(&block.bytes, receipts);
|
||||||
.insert_block(&block.bytes, receipts);
|
|
||||||
|
|
||||||
self.report.write().unwrap().accrue_block(&block);
|
self.report.write().unwrap().accrue_block(&block);
|
||||||
trace!(target: "client", "Imported #{} ({})", header.number(), header.hash());
|
trace!(target: "client", "Imported #{} ({})", header.number(), header.hash());
|
||||||
@ -405,18 +272,16 @@ impl<V> Client<V> where V: Verifier {
|
|||||||
let bad_blocks = bad_blocks.into_iter().collect::<Vec<H256>>();
|
let bad_blocks = bad_blocks.into_iter().collect::<Vec<H256>>();
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut block_queue = self.block_queue.write().unwrap();
|
|
||||||
if !bad_blocks.is_empty() {
|
if !bad_blocks.is_empty() {
|
||||||
block_queue.mark_as_bad(&bad_blocks);
|
self.block_queue.mark_as_bad(&bad_blocks);
|
||||||
}
|
}
|
||||||
if !good_blocks.is_empty() {
|
if !good_blocks.is_empty() {
|
||||||
block_queue.mark_as_good(&good_blocks);
|
self.block_queue.mark_as_good(&good_blocks);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let block_queue = self.block_queue.read().unwrap();
|
if !good_blocks.is_empty() && self.block_queue.queue_info().is_empty() {
|
||||||
if !good_blocks.is_empty() && block_queue.queue_info().is_empty() {
|
|
||||||
io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks {
|
io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks {
|
||||||
good: good_blocks,
|
good: good_blocks,
|
||||||
bad: bad_blocks,
|
bad: bad_blocks,
|
||||||
@ -442,7 +307,7 @@ impl<V> Client<V> where V: Verifier {
|
|||||||
|
|
||||||
/// Get info on the cache.
|
/// Get info on the cache.
|
||||||
pub fn blockchain_cache_info(&self) -> BlockChainCacheSize {
|
pub fn blockchain_cache_info(&self) -> BlockChainCacheSize {
|
||||||
self.chain.read().unwrap().cache_size()
|
self.chain.cache_size()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the report.
|
/// Get the report.
|
||||||
@ -454,13 +319,13 @@ impl<V> Client<V> where V: Verifier {
|
|||||||
|
|
||||||
/// Tick the client.
|
/// Tick the client.
|
||||||
pub fn tick(&self) {
|
pub fn tick(&self) {
|
||||||
self.chain.read().unwrap().collect_garbage();
|
self.chain.collect_garbage();
|
||||||
self.block_queue.read().unwrap().collect_garbage();
|
self.block_queue.collect_garbage();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set up the cache behaviour.
|
/// Set up the cache behaviour.
|
||||||
pub fn configure_cache(&self, pref_cache_size: usize, max_cache_size: usize) {
|
pub fn configure_cache(&self, pref_cache_size: usize, max_cache_size: usize) {
|
||||||
self.chain.write().unwrap().configure_cache(pref_cache_size, max_cache_size);
|
self.chain.configure_cache(pref_cache_size, max_cache_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_hash(chain: &BlockChain, id: BlockId) -> Option<H256> {
|
fn block_hash(chain: &BlockChain, id: BlockId) -> Option<H256> {
|
||||||
@ -475,9 +340,9 @@ impl<V> Client<V> where V: Verifier {
|
|||||||
fn block_number(&self, id: BlockId) -> Option<BlockNumber> {
|
fn block_number(&self, id: BlockId) -> Option<BlockNumber> {
|
||||||
match id {
|
match id {
|
||||||
BlockId::Number(number) => Some(number),
|
BlockId::Number(number) => Some(number),
|
||||||
BlockId::Hash(ref hash) => self.chain.read().unwrap().block_number(hash),
|
BlockId::Hash(ref hash) => self.chain.block_number(hash),
|
||||||
BlockId::Earliest => Some(0),
|
BlockId::Earliest => Some(0),
|
||||||
BlockId::Latest => Some(self.chain.read().unwrap().best_block_number())
|
BlockId::Latest => Some(self.chain.best_block_number())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -494,19 +359,19 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
|
|||||||
|
|
||||||
fn prepare_sealing(&self, author: Address, extra_data: Bytes, transactions: Vec<SignedTransaction>) -> Option<ClosedBlock> {
|
fn prepare_sealing(&self, author: Address, extra_data: Bytes, transactions: Vec<SignedTransaction>) -> Option<ClosedBlock> {
|
||||||
let engine = self.engine.deref().deref();
|
let engine = self.engine.deref().deref();
|
||||||
let h = self.chain.read().unwrap().best_block_hash();
|
let h = self.chain.best_block_hash();
|
||||||
|
|
||||||
let mut b = OpenBlock::new(
|
let mut b = OpenBlock::new(
|
||||||
engine,
|
engine,
|
||||||
self.state_db.lock().unwrap().clone(),
|
self.state_db.lock().unwrap().clone(),
|
||||||
match self.chain.read().unwrap().block_header(&h) { Some(ref x) => x, None => { return None; } },
|
match self.chain.block_header(&h) { Some(ref x) => x, None => {return None} },
|
||||||
self.build_last_hashes(h.clone()),
|
self.build_last_hashes(h.clone()),
|
||||||
author,
|
author,
|
||||||
extra_data,
|
extra_data,
|
||||||
);
|
);
|
||||||
|
|
||||||
// Add uncles
|
// Add uncles
|
||||||
self.chain.read().unwrap()
|
self.chain
|
||||||
.find_uncle_headers(&h, engine.maximum_uncle_age())
|
.find_uncle_headers(&h, engine.maximum_uncle_age())
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@ -535,14 +400,12 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn block_header(&self, id: BlockId) -> Option<Bytes> {
|
fn block_header(&self, id: BlockId) -> Option<Bytes> {
|
||||||
let chain = self.chain.read().unwrap();
|
Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec()))
|
||||||
Self::block_hash(&chain, id).and_then(|hash| chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec()))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_body(&self, id: BlockId) -> Option<Bytes> {
|
fn block_body(&self, id: BlockId) -> Option<Bytes> {
|
||||||
let chain = self.chain.read().unwrap();
|
Self::block_hash(&self.chain, id).and_then(|hash| {
|
||||||
Self::block_hash(&chain, id).and_then(|hash| {
|
self.chain.block(&hash).map(|bytes| {
|
||||||
chain.block(&hash).map(|bytes| {
|
|
||||||
let rlp = Rlp::new(&bytes);
|
let rlp = Rlp::new(&bytes);
|
||||||
let mut body = RlpStream::new_list(2);
|
let mut body = RlpStream::new_list(2);
|
||||||
body.append_raw(rlp.at(1).as_raw(), 1);
|
body.append_raw(rlp.at(1).as_raw(), 1);
|
||||||
@ -553,24 +416,21 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn block(&self, id: BlockId) -> Option<Bytes> {
|
fn block(&self, id: BlockId) -> Option<Bytes> {
|
||||||
let chain = self.chain.read().unwrap();
|
Self::block_hash(&self.chain, id).and_then(|hash| {
|
||||||
Self::block_hash(&chain, id).and_then(|hash| {
|
self.chain.block(&hash)
|
||||||
chain.block(&hash)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_status(&self, id: BlockId) -> BlockStatus {
|
fn block_status(&self, id: BlockId) -> BlockStatus {
|
||||||
let chain = self.chain.read().unwrap();
|
match Self::block_hash(&self.chain, id) {
|
||||||
match Self::block_hash(&chain, id) {
|
Some(ref hash) if self.chain.is_known(hash) => BlockStatus::InChain,
|
||||||
Some(ref hash) if chain.is_known(hash) => BlockStatus::InChain,
|
Some(hash) => self.block_queue.block_status(&hash),
|
||||||
Some(hash) => self.block_queue.read().unwrap().block_status(&hash),
|
|
||||||
None => BlockStatus::Unknown
|
None => BlockStatus::Unknown
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_total_difficulty(&self, id: BlockId) -> Option<U256> {
|
fn block_total_difficulty(&self, id: BlockId) -> Option<U256> {
|
||||||
let chain = self.chain.read().unwrap();
|
Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_details(&hash)).map(|d| d.total_difficulty)
|
||||||
Self::block_hash(&chain, id).and_then(|hash| chain.block_details(&hash)).map(|d| d.total_difficulty)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn nonce(&self, address: &Address) -> U256 {
|
fn nonce(&self, address: &Address) -> U256 {
|
||||||
@ -578,8 +438,7 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn block_hash(&self, id: BlockId) -> Option<H256> {
|
fn block_hash(&self, id: BlockId) -> Option<H256> {
|
||||||
let chain = self.chain.read().unwrap();
|
Self::block_hash(&self.chain, id)
|
||||||
Self::block_hash(&chain, id)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn code(&self, address: &Address) -> Option<Bytes> {
|
fn code(&self, address: &Address) -> Option<Bytes> {
|
||||||
@ -587,20 +446,18 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn transaction(&self, id: TransactionId) -> Option<LocalizedTransaction> {
|
fn transaction(&self, id: TransactionId) -> Option<LocalizedTransaction> {
|
||||||
let chain = self.chain.read().unwrap();
|
|
||||||
match id {
|
match id {
|
||||||
TransactionId::Hash(ref hash) => chain.transaction_address(hash),
|
TransactionId::Hash(ref hash) => self.chain.transaction_address(hash),
|
||||||
TransactionId::Location(id, index) => Self::block_hash(&chain, id).map(|hash| TransactionAddress {
|
TransactionId::Location(id, index) => Self::block_hash(&self.chain, id).map(|hash| TransactionAddress {
|
||||||
block_hash: hash,
|
block_hash: hash,
|
||||||
index: index
|
index: index
|
||||||
})
|
})
|
||||||
}.and_then(|address| chain.transaction(&address))
|
}.and_then(|address| self.chain.transaction(&address))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute> {
|
fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute> {
|
||||||
let chain = self.chain.read().unwrap();
|
match self.chain.is_known(from) && self.chain.is_known(to) {
|
||||||
match chain.is_known(from) && chain.is_known(to) {
|
true => Some(self.chain.tree_route(from.clone(), to.clone())),
|
||||||
true => Some(chain.tree_route(from.clone(), to.clone())),
|
|
||||||
false => None
|
false => None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -616,43 +473,44 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
|
|||||||
fn import_block(&self, bytes: Bytes) -> ImportResult {
|
fn import_block(&self, bytes: Bytes) -> ImportResult {
|
||||||
{
|
{
|
||||||
let header = BlockView::new(&bytes).header_view();
|
let header = BlockView::new(&bytes).header_view();
|
||||||
if self.chain.read().unwrap().is_known(&header.sha3()) {
|
if self.chain.is_known(&header.sha3()) {
|
||||||
return Err(x!(ImportError::AlreadyInChain));
|
return Err(x!(ImportError::AlreadyInChain));
|
||||||
}
|
}
|
||||||
if self.block_status(BlockId::Hash(header.parent_hash())) == BlockStatus::Unknown {
|
if self.block_status(BlockId::Hash(header.parent_hash())) == BlockStatus::Unknown {
|
||||||
return Err(x!(BlockError::UnknownParent(header.parent_hash())));
|
return Err(x!(BlockError::UnknownParent(header.parent_hash())));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.block_queue.write().unwrap().import_block(bytes)
|
self.block_queue.import_block(bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn queue_info(&self) -> BlockQueueInfo {
|
fn queue_info(&self) -> BlockQueueInfo {
|
||||||
self.block_queue.read().unwrap().queue_info()
|
self.block_queue.queue_info()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn clear_queue(&self) {
|
fn clear_queue(&self) {
|
||||||
self.block_queue.write().unwrap().clear();
|
self.block_queue.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn chain_info(&self) -> BlockChainInfo {
|
fn chain_info(&self) -> BlockChainInfo {
|
||||||
let chain = self.chain.read().unwrap();
|
|
||||||
BlockChainInfo {
|
BlockChainInfo {
|
||||||
total_difficulty: chain.best_block_total_difficulty(),
|
total_difficulty: self.chain.best_block_total_difficulty(),
|
||||||
pending_total_difficulty: chain.best_block_total_difficulty(),
|
pending_total_difficulty: self.chain.best_block_total_difficulty(),
|
||||||
genesis_hash: chain.genesis_hash(),
|
genesis_hash: self.chain.genesis_hash(),
|
||||||
best_block_hash: chain.best_block_hash(),
|
best_block_hash: self.chain.best_block_hash(),
|
||||||
best_block_number: From::from(chain.best_block_number())
|
best_block_number: From::from(self.chain.best_block_number())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option<Vec<BlockNumber>> {
|
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option<Vec<BlockNumber>> {
|
||||||
match (self.block_number(from_block), self.block_number(to_block)) {
|
match (self.block_number(from_block), self.block_number(to_block)) {
|
||||||
(Some(from), Some(to)) => Some(self.chain.read().unwrap().blocks_with_bloom(bloom, from, to)),
|
(Some(from), Some(to)) => Some(self.chain.blocks_with_bloom(bloom, from, to)),
|
||||||
_ => None
|
_ => None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry> {
|
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry> {
|
||||||
|
// TODO: lock blockchain only once
|
||||||
|
|
||||||
let mut blocks = filter.bloom_possibilities().iter()
|
let mut blocks = filter.bloom_possibilities().iter()
|
||||||
.filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone()))
|
.filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone()))
|
||||||
.flat_map(|m| m)
|
.flat_map(|m| m)
|
||||||
@ -664,9 +522,9 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
|
|||||||
blocks.sort();
|
blocks.sort();
|
||||||
|
|
||||||
blocks.into_iter()
|
blocks.into_iter()
|
||||||
.filter_map(|number| self.chain.read().unwrap().block_hash(number).map(|hash| (number, hash)))
|
.filter_map(|number| self.chain.block_hash(number).map(|hash| (number, hash)))
|
||||||
.filter_map(|(number, hash)| self.chain.read().unwrap().block_receipts(&hash).map(|r| (number, hash, r.receipts)))
|
.filter_map(|(number, hash)| self.chain.block_receipts(&hash).map(|r| (number, hash, r.receipts)))
|
||||||
.filter_map(|(number, hash, receipts)| self.chain.read().unwrap().block(&hash).map(|ref b| (number, hash, receipts, BlockView::new(b).transaction_hashes())))
|
.filter_map(|(number, hash, receipts)| self.chain.block(&hash).map(|ref b| (number, hash, receipts, BlockView::new(b).transaction_hashes())))
|
||||||
.flat_map(|(number, hash, receipts, hashes)| {
|
.flat_map(|(number, hash, receipts, hashes)| {
|
||||||
let mut log_index = 0;
|
let mut log_index = 0;
|
||||||
receipts.into_iter()
|
receipts.into_iter()
|
@ -14,12 +14,18 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
extern crate rustc_version;
|
pub use block_queue::BlockQueueConfig;
|
||||||
|
pub use blockchain::BlockChainConfig;
|
||||||
|
|
||||||
use rustc_version::{version_meta, Channel};
|
/// Client configuration. Includes configs for all sub-systems.
|
||||||
|
#[derive(Debug, Default)]
|
||||||
fn main() {
|
pub struct ClientConfig {
|
||||||
if let Channel::Nightly = version_meta().channel {
|
/// Block queue configuration.
|
||||||
println!("cargo:rustc-cfg=nightly");
|
pub queue: BlockQueueConfig,
|
||||||
}
|
/// Blockchain configuration.
|
||||||
|
pub blockchain: BlockChainConfig,
|
||||||
|
/// Prefer journal rather than archive.
|
||||||
|
pub prefer_journal: bool,
|
||||||
|
/// The name of the client instance.
|
||||||
|
pub name: String,
|
||||||
}
|
}
|
44
ethcore/src/client/ids.rs
Normal file
44
ethcore/src/client/ids.rs
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! Unique identifiers.
|
||||||
|
|
||||||
|
use util::hash::H256;
|
||||||
|
use header::BlockNumber;
|
||||||
|
|
||||||
|
/// Uniquely identifies block.
|
||||||
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
|
pub enum BlockId {
|
||||||
|
/// Block's sha3.
|
||||||
|
/// Querying by hash is always faster.
|
||||||
|
Hash(H256),
|
||||||
|
/// Block number within canon blockchain.
|
||||||
|
Number(BlockNumber),
|
||||||
|
/// Earliest block (genesis).
|
||||||
|
Earliest,
|
||||||
|
/// Latest mined block.
|
||||||
|
Latest
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Uniquely identifies transaction.
|
||||||
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
|
pub enum TransactionId {
|
||||||
|
/// Transaction's sha3.
|
||||||
|
Hash(H256),
|
||||||
|
/// Block id and transaction index within this block.
|
||||||
|
/// Querying by block position is always faster.
|
||||||
|
Location(BlockId, usize)
|
||||||
|
}
|
112
ethcore/src/client/mod.rs
Normal file
112
ethcore/src/client/mod.rs
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! Blockchain database client.
|
||||||
|
|
||||||
|
mod client;
|
||||||
|
mod config;
|
||||||
|
mod ids;
|
||||||
|
mod test_client;
|
||||||
|
|
||||||
|
pub use self::client::*;
|
||||||
|
pub use self::config::{ClientConfig, BlockQueueConfig, BlockChainConfig};
|
||||||
|
pub use self::ids::{BlockId, TransactionId};
|
||||||
|
pub use self::test_client::{TestBlockChainClient, EachBlockWith};
|
||||||
|
|
||||||
|
use util::bytes::Bytes;
|
||||||
|
use util::hash::{Address, H256, H2048};
|
||||||
|
use util::numbers::U256;
|
||||||
|
use blockchain::TreeRoute;
|
||||||
|
use block_queue::BlockQueueInfo;
|
||||||
|
use block::{ClosedBlock, SealedBlock};
|
||||||
|
use header::BlockNumber;
|
||||||
|
use transaction::{LocalizedTransaction, SignedTransaction};
|
||||||
|
use log_entry::LocalizedLogEntry;
|
||||||
|
use filter::Filter;
|
||||||
|
use error::{ImportResult};
|
||||||
|
|
||||||
|
/// Blockchain database client. Owns and manages a blockchain and a block queue.
|
||||||
|
pub trait BlockChainClient : Sync + Send {
|
||||||
|
/// Get raw block header data by block id.
|
||||||
|
fn block_header(&self, id: BlockId) -> Option<Bytes>;
|
||||||
|
|
||||||
|
/// Get raw block body data by block id.
|
||||||
|
/// Block body is an RLP list of two items: uncles and transactions.
|
||||||
|
fn block_body(&self, id: BlockId) -> Option<Bytes>;
|
||||||
|
|
||||||
|
/// Get raw block data by block header hash.
|
||||||
|
fn block(&self, id: BlockId) -> Option<Bytes>;
|
||||||
|
|
||||||
|
/// Get block status by block header hash.
|
||||||
|
fn block_status(&self, id: BlockId) -> BlockStatus;
|
||||||
|
|
||||||
|
/// Get block total difficulty.
|
||||||
|
fn block_total_difficulty(&self, id: BlockId) -> Option<U256>;
|
||||||
|
|
||||||
|
/// Get address nonce.
|
||||||
|
fn nonce(&self, address: &Address) -> U256;
|
||||||
|
|
||||||
|
/// Get block hash.
|
||||||
|
fn block_hash(&self, id: BlockId) -> Option<H256>;
|
||||||
|
|
||||||
|
/// Get address code.
|
||||||
|
fn code(&self, address: &Address) -> Option<Bytes>;
|
||||||
|
|
||||||
|
/// Get transaction with given hash.
|
||||||
|
fn transaction(&self, id: TransactionId) -> Option<LocalizedTransaction>;
|
||||||
|
|
||||||
|
/// Get a tree route between `from` and `to`.
|
||||||
|
/// See `BlockChain::tree_route`.
|
||||||
|
fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute>;
|
||||||
|
|
||||||
|
/// Get latest state node
|
||||||
|
fn state_data(&self, hash: &H256) -> Option<Bytes>;
|
||||||
|
|
||||||
|
/// Get raw block receipts data by block header hash.
|
||||||
|
fn block_receipts(&self, hash: &H256) -> Option<Bytes>;
|
||||||
|
|
||||||
|
/// Import a block into the blockchain.
|
||||||
|
fn import_block(&self, bytes: Bytes) -> ImportResult;
|
||||||
|
|
||||||
|
/// Get block queue information.
|
||||||
|
fn queue_info(&self) -> BlockQueueInfo;
|
||||||
|
|
||||||
|
/// Clear block queue and abort all import activity.
|
||||||
|
fn clear_queue(&self);
|
||||||
|
|
||||||
|
/// Get blockchain information.
|
||||||
|
fn chain_info(&self) -> BlockChainInfo;
|
||||||
|
|
||||||
|
/// Get the best block header.
|
||||||
|
fn best_block_header(&self) -> Bytes {
|
||||||
|
// TODO: lock blockchain only once
|
||||||
|
self.block_header(BlockId::Hash(self.chain_info().best_block_hash)).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns numbers of blocks containing given bloom.
|
||||||
|
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option<Vec<BlockNumber>>;
|
||||||
|
|
||||||
|
/// Returns logs matching given filter.
|
||||||
|
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry>;
|
||||||
|
|
||||||
|
/// Returns ClosedBlock prepared for sealing.
|
||||||
|
fn prepare_sealing(&self, author: Address, extra_data: Bytes, transactions: Vec<SignedTransaction>) -> Option<ClosedBlock>;
|
||||||
|
|
||||||
|
/// Attempts to seal given block. Returns `SealedBlock` on success and the same block in case of error.
|
||||||
|
fn try_seal(&self, block: ClosedBlock, seal: Vec<Bytes>) -> Result<SealedBlock, ClosedBlock>;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
336
ethcore/src/client/test_client.rs
Normal file
336
ethcore/src/client/test_client.rs
Normal file
@ -0,0 +1,336 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! Test client.
|
||||||
|
|
||||||
|
use util::*;
|
||||||
|
use transaction::{Transaction, LocalizedTransaction, SignedTransaction, Action};
|
||||||
|
use blockchain::TreeRoute;
|
||||||
|
use client::{BlockChainClient, BlockChainInfo, BlockStatus, BlockId, TransactionId};
|
||||||
|
use header::{Header as BlockHeader, BlockNumber};
|
||||||
|
use filter::Filter;
|
||||||
|
use log_entry::LocalizedLogEntry;
|
||||||
|
use receipt::Receipt;
|
||||||
|
use error::{ImportResult};
|
||||||
|
use block_queue::BlockQueueInfo;
|
||||||
|
use block::{SealedBlock, ClosedBlock};
|
||||||
|
|
||||||
|
/// Test client.
|
||||||
|
pub struct TestBlockChainClient {
|
||||||
|
/// Blocks.
|
||||||
|
pub blocks: RwLock<HashMap<H256, Bytes>>,
|
||||||
|
/// Mapping of numbers to hashes.
|
||||||
|
pub numbers: RwLock<HashMap<usize, H256>>,
|
||||||
|
/// Genesis block hash.
|
||||||
|
pub genesis_hash: H256,
|
||||||
|
/// Last block hash.
|
||||||
|
pub last_hash: RwLock<H256>,
|
||||||
|
/// Difficulty.
|
||||||
|
pub difficulty: RwLock<U256>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
/// Used for generating test client blocks.
|
||||||
|
pub enum EachBlockWith {
|
||||||
|
/// Plain block.
|
||||||
|
Nothing,
|
||||||
|
/// Block with an uncle.
|
||||||
|
Uncle,
|
||||||
|
/// Block with a transaction.
|
||||||
|
Transaction,
|
||||||
|
/// Block with an uncle and transaction.
|
||||||
|
UncleAndTransaction
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TestBlockChainClient {
|
||||||
|
/// Creates new test client.
|
||||||
|
pub fn new() -> TestBlockChainClient {
|
||||||
|
|
||||||
|
let mut client = TestBlockChainClient {
|
||||||
|
blocks: RwLock::new(HashMap::new()),
|
||||||
|
numbers: RwLock::new(HashMap::new()),
|
||||||
|
genesis_hash: H256::new(),
|
||||||
|
last_hash: RwLock::new(H256::new()),
|
||||||
|
difficulty: RwLock::new(From::from(0)),
|
||||||
|
};
|
||||||
|
client.add_blocks(1, EachBlockWith::Nothing); // add genesis block
|
||||||
|
client.genesis_hash = client.last_hash.read().unwrap().clone();
|
||||||
|
client
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Add blocks to test client.
|
||||||
|
pub fn add_blocks(&mut self, count: usize, with: EachBlockWith) {
|
||||||
|
let len = self.numbers.read().unwrap().len();
|
||||||
|
for n in len..(len + count) {
|
||||||
|
let mut header = BlockHeader::new();
|
||||||
|
header.difficulty = From::from(n);
|
||||||
|
header.parent_hash = self.last_hash.read().unwrap().clone();
|
||||||
|
header.number = n as BlockNumber;
|
||||||
|
let uncles = match with {
|
||||||
|
EachBlockWith::Uncle | EachBlockWith::UncleAndTransaction => {
|
||||||
|
let mut uncles = RlpStream::new_list(1);
|
||||||
|
let mut uncle_header = BlockHeader::new();
|
||||||
|
uncle_header.difficulty = From::from(n);
|
||||||
|
uncle_header.parent_hash = self.last_hash.read().unwrap().clone();
|
||||||
|
uncle_header.number = n as BlockNumber;
|
||||||
|
uncles.append(&uncle_header);
|
||||||
|
header.uncles_hash = uncles.as_raw().sha3();
|
||||||
|
uncles
|
||||||
|
},
|
||||||
|
_ => RlpStream::new_list(0)
|
||||||
|
};
|
||||||
|
let txs = match with {
|
||||||
|
EachBlockWith::Transaction | EachBlockWith::UncleAndTransaction => {
|
||||||
|
let mut txs = RlpStream::new_list(1);
|
||||||
|
let keypair = KeyPair::create().unwrap();
|
||||||
|
let tx = Transaction {
|
||||||
|
action: Action::Create,
|
||||||
|
value: U256::from(100),
|
||||||
|
data: "3331600055".from_hex().unwrap(),
|
||||||
|
gas: U256::from(100_000),
|
||||||
|
gas_price: U256::one(),
|
||||||
|
nonce: U256::zero()
|
||||||
|
};
|
||||||
|
let signed_tx = tx.sign(&keypair.secret());
|
||||||
|
txs.append(&signed_tx);
|
||||||
|
txs.out()
|
||||||
|
},
|
||||||
|
_ => rlp::NULL_RLP.to_vec()
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut rlp = RlpStream::new_list(3);
|
||||||
|
rlp.append(&header);
|
||||||
|
rlp.append_raw(&txs, 1);
|
||||||
|
rlp.append_raw(uncles.as_raw(), 1);
|
||||||
|
self.import_block(rlp.as_raw().to_vec()).unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// TODO:
|
||||||
|
pub fn corrupt_block(&mut self, n: BlockNumber) {
|
||||||
|
let hash = self.block_hash(BlockId::Number(n)).unwrap();
|
||||||
|
let mut header: BlockHeader = decode(&self.block_header(BlockId::Number(n)).unwrap());
|
||||||
|
header.parent_hash = H256::new();
|
||||||
|
let mut rlp = RlpStream::new_list(3);
|
||||||
|
rlp.append(&header);
|
||||||
|
rlp.append_raw(&rlp::NULL_RLP, 1);
|
||||||
|
rlp.append_raw(&rlp::NULL_RLP, 1);
|
||||||
|
self.blocks.write().unwrap().insert(hash, rlp.out());
|
||||||
|
}
|
||||||
|
|
||||||
|
/// TODO:
|
||||||
|
pub fn block_hash_delta_minus(&mut self, delta: usize) -> H256 {
|
||||||
|
let blocks_read = self.numbers.read().unwrap();
|
||||||
|
let index = blocks_read.len() - delta;
|
||||||
|
blocks_read[&index].clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block_hash(&self, id: BlockId) -> Option<H256> {
|
||||||
|
match id {
|
||||||
|
BlockId::Hash(hash) => Some(hash),
|
||||||
|
BlockId::Number(n) => self.numbers.read().unwrap().get(&(n as usize)).cloned(),
|
||||||
|
BlockId::Earliest => self.numbers.read().unwrap().get(&0).cloned(),
|
||||||
|
BlockId::Latest => self.numbers.read().unwrap().get(&(self.numbers.read().unwrap().len() - 1)).cloned()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BlockChainClient for TestBlockChainClient {
|
||||||
|
fn block_total_difficulty(&self, _id: BlockId) -> Option<U256> {
|
||||||
|
Some(U256::zero())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block_hash(&self, _id: BlockId) -> Option<H256> {
|
||||||
|
unimplemented!();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn nonce(&self, _address: &Address) -> U256 {
|
||||||
|
U256::zero()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn code(&self, _address: &Address) -> Option<Bytes> {
|
||||||
|
unimplemented!();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn transaction(&self, _id: TransactionId) -> Option<LocalizedTransaction> {
|
||||||
|
unimplemented!();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn blocks_with_bloom(&self, _bloom: &H2048, _from_block: BlockId, _to_block: BlockId) -> Option<Vec<BlockNumber>> {
|
||||||
|
unimplemented!();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn logs(&self, _filter: Filter) -> Vec<LocalizedLogEntry> {
|
||||||
|
unimplemented!();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn prepare_sealing(&self, _author: Address, _extra_data: Bytes, _transactions: Vec<SignedTransaction>) -> Option<ClosedBlock> {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn try_seal(&self, _block: ClosedBlock, _seal: Vec<Bytes>) -> Result<SealedBlock, ClosedBlock> {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block_header(&self, id: BlockId) -> Option<Bytes> {
|
||||||
|
self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).map(|r| Rlp::new(r).at(0).as_raw().to_vec()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block_body(&self, id: BlockId) -> Option<Bytes> {
|
||||||
|
self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).map(|r| {
|
||||||
|
let mut stream = RlpStream::new_list(2);
|
||||||
|
stream.append_raw(Rlp::new(&r).at(1).as_raw(), 1);
|
||||||
|
stream.append_raw(Rlp::new(&r).at(2).as_raw(), 1);
|
||||||
|
stream.out()
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block(&self, id: BlockId) -> Option<Bytes> {
|
||||||
|
self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).cloned())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block_status(&self, id: BlockId) -> BlockStatus {
|
||||||
|
match id {
|
||||||
|
BlockId::Number(number) if (number as usize) < self.blocks.read().unwrap().len() => BlockStatus::InChain,
|
||||||
|
BlockId::Hash(ref hash) if self.blocks.read().unwrap().get(hash).is_some() => BlockStatus::InChain,
|
||||||
|
_ => BlockStatus::Unknown
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// works only if blocks are one after another 1 -> 2 -> 3
|
||||||
|
fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute> {
|
||||||
|
Some(TreeRoute {
|
||||||
|
ancestor: H256::new(),
|
||||||
|
index: 0,
|
||||||
|
blocks: {
|
||||||
|
let numbers_read = self.numbers.read().unwrap();
|
||||||
|
let mut adding = false;
|
||||||
|
|
||||||
|
let mut blocks = Vec::new();
|
||||||
|
for (_, hash) in numbers_read.iter().sort_by(|tuple1, tuple2| tuple1.0.cmp(tuple2.0)) {
|
||||||
|
if hash == to {
|
||||||
|
if adding {
|
||||||
|
blocks.push(hash.clone());
|
||||||
|
}
|
||||||
|
adding = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if hash == from {
|
||||||
|
adding = true;
|
||||||
|
}
|
||||||
|
if adding {
|
||||||
|
blocks.push(hash.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if adding { Vec::new() } else { blocks }
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: returns just hashes instead of node state rlp(?)
|
||||||
|
fn state_data(&self, hash: &H256) -> Option<Bytes> {
|
||||||
|
// starts with 'f' ?
|
||||||
|
if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") {
|
||||||
|
let mut rlp = RlpStream::new();
|
||||||
|
rlp.append(&hash.clone());
|
||||||
|
return Some(rlp.out());
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block_receipts(&self, hash: &H256) -> Option<Bytes> {
|
||||||
|
// starts with 'f' ?
|
||||||
|
if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") {
|
||||||
|
let receipt = Receipt::new(
|
||||||
|
H256::zero(),
|
||||||
|
U256::zero(),
|
||||||
|
vec![]);
|
||||||
|
let mut rlp = RlpStream::new();
|
||||||
|
rlp.append(&receipt);
|
||||||
|
return Some(rlp.out());
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
fn import_block(&self, b: Bytes) -> ImportResult {
|
||||||
|
let header = Rlp::new(&b).val_at::<BlockHeader>(0);
|
||||||
|
let h = header.hash();
|
||||||
|
let number: usize = header.number as usize;
|
||||||
|
if number > self.blocks.read().unwrap().len() {
|
||||||
|
panic!("Unexpected block number. Expected {}, got {}", self.blocks.read().unwrap().len(), number);
|
||||||
|
}
|
||||||
|
if number > 0 {
|
||||||
|
match self.blocks.read().unwrap().get(&header.parent_hash) {
|
||||||
|
Some(parent) => {
|
||||||
|
let parent = Rlp::new(parent).val_at::<BlockHeader>(0);
|
||||||
|
if parent.number != (header.number - 1) {
|
||||||
|
panic!("Unexpected block parent");
|
||||||
|
}
|
||||||
|
},
|
||||||
|
None => {
|
||||||
|
panic!("Unknown block parent {:?} for block {}", header.parent_hash, number);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let len = self.numbers.read().unwrap().len();
|
||||||
|
if number == len {
|
||||||
|
{
|
||||||
|
let mut difficulty = self.difficulty.write().unwrap();
|
||||||
|
*difficulty.deref_mut() = *difficulty.deref() + header.difficulty;
|
||||||
|
}
|
||||||
|
mem::replace(self.last_hash.write().unwrap().deref_mut(), h.clone());
|
||||||
|
self.blocks.write().unwrap().insert(h.clone(), b);
|
||||||
|
self.numbers.write().unwrap().insert(number, h.clone());
|
||||||
|
let mut parent_hash = header.parent_hash;
|
||||||
|
if number > 0 {
|
||||||
|
let mut n = number - 1;
|
||||||
|
while n > 0 && self.numbers.read().unwrap()[&n] != parent_hash {
|
||||||
|
*self.numbers.write().unwrap().get_mut(&n).unwrap() = parent_hash.clone();
|
||||||
|
n -= 1;
|
||||||
|
parent_hash = Rlp::new(&self.blocks.read().unwrap()[&parent_hash]).val_at::<BlockHeader>(0).parent_hash;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
self.blocks.write().unwrap().insert(h.clone(), b.to_vec());
|
||||||
|
}
|
||||||
|
Ok(h)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn queue_info(&self) -> BlockQueueInfo {
|
||||||
|
BlockQueueInfo {
|
||||||
|
verified_queue_size: 0,
|
||||||
|
unverified_queue_size: 0,
|
||||||
|
verifying_queue_size: 0,
|
||||||
|
max_queue_size: 0,
|
||||||
|
max_mem_use: 0,
|
||||||
|
mem_used: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn clear_queue(&self) {
|
||||||
|
}
|
||||||
|
|
||||||
|
fn chain_info(&self) -> BlockChainInfo {
|
||||||
|
BlockChainInfo {
|
||||||
|
total_difficulty: *self.difficulty.read().unwrap(),
|
||||||
|
pending_total_difficulty: *self.difficulty.read().unwrap(),
|
||||||
|
genesis_hash: self.genesis_hash.clone(),
|
||||||
|
best_block_hash: self.last_hash.read().unwrap().clone(),
|
||||||
|
best_block_number: self.blocks.read().unwrap().len() as BlockNumber - 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -202,7 +202,7 @@ impl Engine for Ethash {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] // to_ethash should take self
|
#[cfg_attr(feature="dev", allow(wrong_self_convention))] // to_ethash should take self
|
||||||
impl Ethash {
|
impl Ethash {
|
||||||
fn calculate_difficuty(&self, header: &Header, parent: &Header) -> U256 {
|
fn calculate_difficuty(&self, header: &Header, parent: &Header) -> U256 {
|
||||||
const EXP_DIFF_PERIOD: u64 = 100000;
|
const EXP_DIFF_PERIOD: u64 = 100000;
|
||||||
|
@ -243,7 +243,7 @@ struct CodeReader<'a> {
|
|||||||
code: &'a Bytes
|
code: &'a Bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(len_without_is_empty))]
|
#[cfg_attr(feature="dev", allow(len_without_is_empty))]
|
||||||
impl<'a> CodeReader<'a> {
|
impl<'a> CodeReader<'a> {
|
||||||
/// Get `no_of_bytes` from code and convert to U256. Move PC
|
/// Get `no_of_bytes` from code and convert to U256. Move PC
|
||||||
fn read(&mut self, no_of_bytes: usize) -> U256 {
|
fn read(&mut self, no_of_bytes: usize) -> U256 {
|
||||||
@ -258,7 +258,7 @@ impl<'a> CodeReader<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(enum_variant_names))]
|
#[cfg_attr(feature="dev", allow(enum_variant_names))]
|
||||||
enum InstructionCost {
|
enum InstructionCost {
|
||||||
Gas(U256),
|
Gas(U256),
|
||||||
GasMem(U256, U256),
|
GasMem(U256, U256),
|
||||||
@ -347,7 +347,7 @@ impl evm::Evm for Interpreter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Interpreter {
|
impl Interpreter {
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
||||||
fn get_gas_cost_mem(&self,
|
fn get_gas_cost_mem(&self,
|
||||||
ext: &evm::Ext,
|
ext: &evm::Ext,
|
||||||
instruction: Instruction,
|
instruction: Instruction,
|
||||||
|
@ -188,7 +188,7 @@ impl<'a> Ext for Externalities<'a> {
|
|||||||
self.state.code(address).unwrap_or_else(|| vec![])
|
self.state.code(address).unwrap_or_else(|| vec![])
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(match_ref_pats))]
|
#[cfg_attr(feature="dev", allow(match_ref_pats))]
|
||||||
fn ret(&mut self, gas: &U256, data: &[u8]) -> Result<U256, evm::Error> {
|
fn ret(&mut self, gas: &U256, data: &[u8]) -> Result<U256, evm::Error> {
|
||||||
match &mut self.output {
|
match &mut self.output {
|
||||||
&mut OutputPolicy::Return(BytesRef::Fixed(ref mut slice)) => unsafe {
|
&mut OutputPolicy::Return(BytesRef::Fixed(ref mut slice)) => unsafe {
|
||||||
|
@ -15,16 +15,16 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
#![warn(missing_docs)]
|
#![warn(missing_docs)]
|
||||||
#![cfg_attr(all(nightly, feature="dev"), feature(plugin))]
|
#![cfg_attr(feature="dev", feature(plugin))]
|
||||||
#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))]
|
#![cfg_attr(feature="dev", plugin(clippy))]
|
||||||
|
|
||||||
// Clippy config
|
// Clippy config
|
||||||
// TODO [todr] not really sure
|
// TODO [todr] not really sure
|
||||||
#![cfg_attr(all(nightly, feature="dev"), allow(needless_range_loop))]
|
#![cfg_attr(feature="dev", allow(needless_range_loop))]
|
||||||
// Shorter than if-else
|
// Shorter than if-else
|
||||||
#![cfg_attr(all(nightly, feature="dev"), allow(match_bool))]
|
#![cfg_attr(feature="dev", allow(match_bool))]
|
||||||
// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref.
|
// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref.
|
||||||
#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))]
|
#![cfg_attr(feature="dev", allow(clone_on_copy))]
|
||||||
|
|
||||||
//! Ethcore library
|
//! Ethcore library
|
||||||
//!
|
//!
|
||||||
@ -86,6 +86,7 @@ extern crate crossbeam;
|
|||||||
#[cfg(feature = "jit" )] extern crate evmjit;
|
#[cfg(feature = "jit" )] extern crate evmjit;
|
||||||
|
|
||||||
pub mod block;
|
pub mod block;
|
||||||
|
pub mod block_queue;
|
||||||
pub mod client;
|
pub mod client;
|
||||||
pub mod error;
|
pub mod error;
|
||||||
pub mod ethereum;
|
pub mod ethereum;
|
||||||
@ -119,7 +120,6 @@ mod substate;
|
|||||||
mod executive;
|
mod executive;
|
||||||
mod externalities;
|
mod externalities;
|
||||||
mod verification;
|
mod verification;
|
||||||
mod block_queue;
|
|
||||||
mod blockchain;
|
mod blockchain;
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
@ -119,7 +119,7 @@ impl IoHandler<NetSyncMessage> for ClientIoHandler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(single_match))]
|
#[cfg_attr(feature="dev", allow(single_match))]
|
||||||
fn message(&self, io: &IoContext<NetSyncMessage>, net_message: &NetSyncMessage) {
|
fn message(&self, io: &IoContext<NetSyncMessage>, net_message: &NetSyncMessage) {
|
||||||
if let UserMessage(ref message) = *net_message {
|
if let UserMessage(ref message) = *net_message {
|
||||||
match *message {
|
match *message {
|
||||||
|
@ -99,7 +99,7 @@ pub struct Spec {
|
|||||||
genesis_state: PodState,
|
genesis_state: PodState,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] // because to_engine(self) should be to_engine(&self)
|
#[cfg_attr(feature="dev", allow(wrong_self_convention))] // because to_engine(self) should be to_engine(&self)
|
||||||
impl Spec {
|
impl Spec {
|
||||||
/// Convert this object into a boxed Engine of the right underlying type.
|
/// Convert this object into a boxed Engine of the right underlying type.
|
||||||
// TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead.
|
// TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead.
|
||||||
|
@ -224,7 +224,7 @@ impl State {
|
|||||||
|
|
||||||
/// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit.
|
/// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit.
|
||||||
/// `accounts` is mutable because we may need to commit the code or storage and record that.
|
/// `accounts` is mutable because we may need to commit the code or storage and record that.
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(match_ref_pats))]
|
#[cfg_attr(feature="dev", allow(match_ref_pats))]
|
||||||
pub fn commit_into(db: &mut HashDB, root: &mut H256, accounts: &mut HashMap<Address, Option<Account>>) {
|
pub fn commit_into(db: &mut HashDB, root: &mut H256, accounts: &mut HashMap<Address, Option<Account>>) {
|
||||||
// first, commit the sub trees.
|
// first, commit the sub trees.
|
||||||
// TODO: is this necessary or can we dispense with the `ref mut a` for just `a`?
|
// TODO: is this necessary or can we dispense with the `ref mut a` for just `a`?
|
||||||
|
@ -80,7 +80,7 @@ impl Transaction {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl FromJson for SignedTransaction {
|
impl FromJson for SignedTransaction {
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(single_char_pattern))]
|
#[cfg_attr(feature="dev", allow(single_char_pattern))]
|
||||||
fn from_json(json: &Json) -> SignedTransaction {
|
fn from_json(json: &Json) -> SignedTransaction {
|
||||||
let t = Transaction {
|
let t = Transaction {
|
||||||
nonce: xjson!(&json["nonce"]),
|
nonce: xjson!(&json["nonce"]),
|
||||||
|
13
hook.sh
13
hook.sh
@ -1,3 +1,12 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
echo "#!/bin/sh\ncargo build --features dev-clippy && cargo test --no-run -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer --features dev-clippy" > ./.git/hooks/pre-push
|
FILE=./.git/hooks/pre-push
|
||||||
chmod +x ./.git/hooks/pre-push
|
echo "#!/bin/sh\n" > $FILE
|
||||||
|
# Exit on any error
|
||||||
|
echo "set -e" >> $FILE
|
||||||
|
# Run release build
|
||||||
|
echo "cargo build --release --features dev" >> $FILE
|
||||||
|
# Build tests
|
||||||
|
echo "cargo test --no-run --features dev \\" >> $FILE
|
||||||
|
echo " -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer" >> $FILE
|
||||||
|
echo "" >> $FILE
|
||||||
|
chmod +x $FILE
|
||||||
|
@ -21,4 +21,4 @@ clippy = { version = "0.0.44", optional = true }
|
|||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = []
|
default = []
|
||||||
dev = []
|
dev = ["clippy"]
|
||||||
|
160
parity/main.rs
160
parity/main.rs
@ -17,8 +17,8 @@
|
|||||||
//! Ethcore client application.
|
//! Ethcore client application.
|
||||||
|
|
||||||
#![warn(missing_docs)]
|
#![warn(missing_docs)]
|
||||||
#![cfg_attr(all(nightly, feature="dev"), feature(plugin))]
|
#![cfg_attr(feature="dev", feature(plugin))]
|
||||||
#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))]
|
#![cfg_attr(feature="dev", plugin(clippy))]
|
||||||
extern crate docopt;
|
extern crate docopt;
|
||||||
extern crate rustc_serialize;
|
extern crate rustc_serialize;
|
||||||
extern crate ethcore_util as util;
|
extern crate ethcore_util as util;
|
||||||
@ -38,7 +38,7 @@ extern crate rpassword;
|
|||||||
#[cfg(feature = "rpc")]
|
#[cfg(feature = "rpc")]
|
||||||
extern crate ethcore_rpc as rpc;
|
extern crate ethcore_rpc as rpc;
|
||||||
|
|
||||||
use std::net::{SocketAddr};
|
use std::net::{SocketAddr, IpAddr};
|
||||||
use std::env;
|
use std::env;
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
@ -55,6 +55,7 @@ use ethminer::{Miner, MinerService};
|
|||||||
use docopt::Docopt;
|
use docopt::Docopt;
|
||||||
use daemonize::Daemonize;
|
use daemonize::Daemonize;
|
||||||
use number_prefix::{binary_prefix, Standalone, Prefixed};
|
use number_prefix::{binary_prefix, Standalone, Prefixed};
|
||||||
|
use util::keys::store::*;
|
||||||
|
|
||||||
fn die_with_message(msg: &str) -> ! {
|
fn die_with_message(msg: &str) -> ! {
|
||||||
println!("ERROR: {}", msg);
|
println!("ERROR: {}", msg);
|
||||||
@ -72,28 +73,26 @@ Parity. Ethereum Client.
|
|||||||
Copyright 2015, 2016 Ethcore (UK) Limited
|
Copyright 2015, 2016 Ethcore (UK) Limited
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
parity daemon <pid-file> [options] [ --no-bootstrap | <enode>... ]
|
parity daemon <pid-file> [options]
|
||||||
parity account (new | list)
|
parity account (new | list)
|
||||||
parity [options] [ --no-bootstrap | <enode>... ]
|
parity [options]
|
||||||
|
|
||||||
Protocol Options:
|
Protocol Options:
|
||||||
--chain CHAIN Specify the blockchain type. CHAIN may be either a JSON chain specification file
|
--chain CHAIN Specify the blockchain type. CHAIN may be either a JSON chain specification file
|
||||||
or olympic, frontier, homestead, mainnet, morden, or testnet [default: homestead].
|
or olympic, frontier, homestead, mainnet, morden, or testnet [default: homestead].
|
||||||
--testnet Equivalent to --chain testnet (geth-compatible).
|
--db-path PATH Specify the database & configuration directory path [default: $HOME/.parity]
|
||||||
--networkid INDEX Override the network identifier from the chain we are on.
|
|
||||||
--pruning Client should prune the state/storage trie.
|
--pruning Client should prune the state/storage trie.
|
||||||
-d --datadir PATH Specify the database & configuration directory path [default: $HOME/.parity]
|
|
||||||
--keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys]
|
--keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys]
|
||||||
--identity NAME Specify your node's name.
|
--identity NAME Specify your node's name.
|
||||||
|
|
||||||
Networking Options:
|
Networking Options:
|
||||||
--no-bootstrap Don't bother trying to connect to any nodes initially.
|
--port PORT Override the port on which the node should listen [default: 30303].
|
||||||
--listen-address URL Specify the IP/port on which to listen for peers [default: 0.0.0.0:30304].
|
|
||||||
--public-address URL Specify the IP/port on which peers may connect.
|
|
||||||
--address URL Equivalent to --listen-address URL --public-address URL.
|
|
||||||
--peers NUM Try to maintain that many peers [default: 25].
|
--peers NUM Try to maintain that many peers [default: 25].
|
||||||
|
--nat METHOD Specify method to use for determining public address. Must be one of: any, none,
|
||||||
|
upnp, extip:(IP) [default: any].
|
||||||
|
--bootnodes NODES Specify additional comma-separated bootnodes.
|
||||||
|
--no-bootstrap Don't bother trying to connect to standard bootnodes.
|
||||||
--no-discovery Disable new peer discovery.
|
--no-discovery Disable new peer discovery.
|
||||||
--no-upnp Disable trying to figure out the correct public adderss over UPnP.
|
|
||||||
--node-key KEY Specify node secret key, either as 64-character hex string or input to SHA3 operation.
|
--node-key KEY Specify node secret key, either as 64-character hex string or input to SHA3 operation.
|
||||||
|
|
||||||
API and Console Options:
|
API and Console Options:
|
||||||
@ -103,17 +102,12 @@ API and Console Options:
|
|||||||
--jsonrpc-cors URL Specify CORS header for JSON-RPC API responses [default: null].
|
--jsonrpc-cors URL Specify CORS header for JSON-RPC API responses [default: null].
|
||||||
--jsonrpc-apis APIS Specify the APIs available through the JSONRPC interface. APIS is a comma-delimited
|
--jsonrpc-apis APIS Specify the APIs available through the JSONRPC interface. APIS is a comma-delimited
|
||||||
list of API name. Possible name are web3, eth and net. [default: web3,eth,net].
|
list of API name. Possible name are web3, eth and net. [default: web3,eth,net].
|
||||||
--rpc Equivalent to --jsonrpc (geth-compatible).
|
|
||||||
--rpcaddr HOST Equivalent to --jsonrpc-addr HOST (geth-compatible).
|
|
||||||
--rpcport PORT Equivalent to --jsonrpc-port PORT (geth-compatible).
|
|
||||||
--rpcapi APIS Equivalent to --jsonrpc-apis APIS (geth-compatible).
|
|
||||||
--rpccorsdomain URL Equivalent to --jsonrpc-cors URL (geth-compatible).
|
|
||||||
|
|
||||||
Sealing/Mining Options:
|
Sealing/Mining Options:
|
||||||
--gasprice GAS Minimal gas price a transaction must have to be accepted for mining [default: 20000000000].
|
--gasprice GAS Minimal gas price a transaction must have to be accepted for mining [default: 20000000000].
|
||||||
--author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards
|
--author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards
|
||||||
from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63].
|
from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63].
|
||||||
--extradata STRING Specify a custom extra-data for authored blocks, no more than 32 characters.
|
--extra-data STRING Specify a custom extra-data for authored blocks, no more than 32 characters.
|
||||||
|
|
||||||
Memory Footprint Options:
|
Memory Footprint Options:
|
||||||
--cache-pref-size BYTES Specify the prefered size of the blockchain cache in bytes [default: 16384].
|
--cache-pref-size BYTES Specify the prefered size of the blockchain cache in bytes [default: 16384].
|
||||||
@ -122,6 +116,21 @@ Memory Footprint Options:
|
|||||||
--cache MEGABYTES Set total amount of cache to use for the entire system, mutually exclusive with
|
--cache MEGABYTES Set total amount of cache to use for the entire system, mutually exclusive with
|
||||||
other cache options (geth-compatible).
|
other cache options (geth-compatible).
|
||||||
|
|
||||||
|
Geth-Compatibility Options
|
||||||
|
--datadir PATH Equivalent to --db-path PATH.
|
||||||
|
--testnet Equivalent to --chain testnet.
|
||||||
|
--networkid INDEX Override the network identifier from the chain we are on.
|
||||||
|
--rpc Equivalent to --jsonrpc.
|
||||||
|
--rpcaddr HOST Equivalent to --jsonrpc-addr HOST.
|
||||||
|
--rpcport PORT Equivalent to --jsonrpc-port PORT.
|
||||||
|
--rpcapi APIS Equivalent to --jsonrpc-apis APIS.
|
||||||
|
--rpccorsdomain URL Equivalent to --jsonrpc-cors URL.
|
||||||
|
--maxpeers COUNT Equivalent to --peers COUNT.
|
||||||
|
--nodekey KEY Equivalent to --node-key KEY.
|
||||||
|
--nodiscover Equivalent to --no-discovery.
|
||||||
|
--etherbase ADDRESS Equivalent to --author ADDRESS.
|
||||||
|
--extradata STRING Equivalent to --extra-data STRING.
|
||||||
|
|
||||||
Miscellaneous Options:
|
Miscellaneous Options:
|
||||||
-l --logging LOGGING Specify the logging level.
|
-l --logging LOGGING Specify the logging level.
|
||||||
-v --version Show information about version.
|
-v --version Show information about version.
|
||||||
@ -135,22 +144,18 @@ struct Args {
|
|||||||
cmd_new: bool,
|
cmd_new: bool,
|
||||||
cmd_list: bool,
|
cmd_list: bool,
|
||||||
arg_pid_file: String,
|
arg_pid_file: String,
|
||||||
arg_enode: Vec<String>,
|
|
||||||
flag_chain: String,
|
flag_chain: String,
|
||||||
flag_testnet: bool,
|
flag_db_path: String,
|
||||||
flag_datadir: String,
|
|
||||||
flag_networkid: Option<String>,
|
|
||||||
flag_identity: String,
|
flag_identity: String,
|
||||||
flag_cache: Option<usize>,
|
flag_cache: Option<usize>,
|
||||||
flag_keys_path: String,
|
flag_keys_path: String,
|
||||||
|
flag_bootnodes: Option<String>,
|
||||||
flag_pruning: bool,
|
flag_pruning: bool,
|
||||||
flag_no_bootstrap: bool,
|
flag_no_bootstrap: bool,
|
||||||
flag_listen_address: String,
|
flag_port: u16,
|
||||||
flag_public_address: Option<String>,
|
|
||||||
flag_address: Option<String>,
|
|
||||||
flag_peers: usize,
|
flag_peers: usize,
|
||||||
flag_no_discovery: bool,
|
flag_no_discovery: bool,
|
||||||
flag_no_upnp: bool,
|
flag_nat: String,
|
||||||
flag_node_key: Option<String>,
|
flag_node_key: Option<String>,
|
||||||
flag_cache_pref_size: usize,
|
flag_cache_pref_size: usize,
|
||||||
flag_cache_max_size: usize,
|
flag_cache_max_size: usize,
|
||||||
@ -160,16 +165,24 @@ struct Args {
|
|||||||
flag_jsonrpc_port: u16,
|
flag_jsonrpc_port: u16,
|
||||||
flag_jsonrpc_cors: String,
|
flag_jsonrpc_cors: String,
|
||||||
flag_jsonrpc_apis: String,
|
flag_jsonrpc_apis: String,
|
||||||
|
flag_logging: Option<String>,
|
||||||
|
flag_version: bool,
|
||||||
|
// geth-compatibility...
|
||||||
|
flag_nodekey: Option<String>,
|
||||||
|
flag_nodiscover: bool,
|
||||||
|
flag_maxpeers: Option<usize>,
|
||||||
|
flag_author: String,
|
||||||
|
flag_extra_data: Option<String>,
|
||||||
|
flag_datadir: Option<String>,
|
||||||
|
flag_extradata: Option<String>,
|
||||||
|
flag_etherbase: Option<String>,
|
||||||
flag_rpc: bool,
|
flag_rpc: bool,
|
||||||
flag_rpcaddr: Option<String>,
|
flag_rpcaddr: Option<String>,
|
||||||
flag_rpcport: Option<u16>,
|
flag_rpcport: Option<u16>,
|
||||||
flag_rpccorsdomain: Option<String>,
|
flag_rpccorsdomain: Option<String>,
|
||||||
flag_rpcapi: Option<String>,
|
flag_rpcapi: Option<String>,
|
||||||
flag_logging: Option<String>,
|
flag_testnet: bool,
|
||||||
flag_version: bool,
|
flag_networkid: Option<String>,
|
||||||
flag_gasprice: String,
|
|
||||||
flag_author: String,
|
|
||||||
flag_extra_data: Option<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn setup_log(init: &Option<String>) {
|
fn setup_log(init: &Option<String>) {
|
||||||
@ -199,7 +212,9 @@ fn setup_log(init: &Option<String>) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "rpc")]
|
#[cfg(feature = "rpc")]
|
||||||
fn setup_rpc_server(client: Arc<Client>, sync: Arc<EthSync>, miner: Arc<Miner>, url: &str, cors_domain: &str, apis: Vec<&str>) -> Option<Arc<PanicHandler>> {
|
fn setup_rpc_server(
|
||||||
|
client: Arc<Client>, sync: Arc<EthSync>, secret_store: Arc<AccountService>, miner: Arc<Miner>,
|
||||||
|
url: &str, cors_domain: &str, apis: Vec<&str>) -> Option<Arc<PanicHandler>> {
|
||||||
use rpc::v1::*;
|
use rpc::v1::*;
|
||||||
|
|
||||||
let server = rpc::RpcServer::new();
|
let server = rpc::RpcServer::new();
|
||||||
@ -208,7 +223,7 @@ fn setup_rpc_server(client: Arc<Client>, sync: Arc<EthSync>, miner: Arc<Miner>,
|
|||||||
"web3" => server.add_delegate(Web3Client::new().to_delegate()),
|
"web3" => server.add_delegate(Web3Client::new().to_delegate()),
|
||||||
"net" => server.add_delegate(NetClient::new(&sync).to_delegate()),
|
"net" => server.add_delegate(NetClient::new(&sync).to_delegate()),
|
||||||
"eth" => {
|
"eth" => {
|
||||||
server.add_delegate(EthClient::new(&client, &sync, &miner).to_delegate());
|
server.add_delegate(EthClient::new(&client, &sync, &secret_store, &miner).to_delegate());
|
||||||
server.add_delegate(EthFilterClient::new(&client).to_delegate());
|
server.add_delegate(EthFilterClient::new(&client).to_delegate());
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
@ -220,7 +235,9 @@ fn setup_rpc_server(client: Arc<Client>, sync: Arc<EthSync>, miner: Arc<Miner>,
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(feature = "rpc"))]
|
#[cfg(not(feature = "rpc"))]
|
||||||
fn setup_rpc_server(_client: Arc<Client>, _sync: Arc<EthSync>, _url: &str) -> Option<Arc<PanicHandler>> {
|
fn setup_rpc_server(
|
||||||
|
_client: Arc<Client>, _sync: Arc<EthSync>, _secret_store: Arc<AccountService>, _miner: Arc<Miner>,
|
||||||
|
_url: &str, _cors_domain: &str, _apis: Vec<&str>) -> Option<Arc<PanicHandler>> {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -249,12 +266,14 @@ impl Configuration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn path(&self) -> String {
|
fn path(&self) -> String {
|
||||||
self.args.flag_datadir.replace("$HOME", env::home_dir().unwrap().to_str().unwrap())
|
let d = self.args.flag_datadir.as_ref().unwrap_or(&self.args.flag_db_path);
|
||||||
|
d.replace("$HOME", env::home_dir().unwrap().to_str().unwrap())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn author(&self) -> Address {
|
fn author(&self) -> Address {
|
||||||
Address::from_str(&self.args.flag_author).unwrap_or_else(|_| {
|
let d = self.args.flag_etherbase.as_ref().unwrap_or(&self.args.flag_author);
|
||||||
die!("{}: Invalid address for --author. Must be 40 hex characters, without the 0x at the beginning.", self.args.flag_author)
|
Address::from_str(d).unwrap_or_else(|_| {
|
||||||
|
die!("{}: Invalid address for --author. Must be 40 hex characters, without the 0x at the beginning.", d)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -265,7 +284,7 @@ impl Configuration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn extra_data(&self) -> Bytes {
|
fn extra_data(&self) -> Bytes {
|
||||||
match self.args.flag_extra_data {
|
match self.args.flag_extradata.as_ref().or(self.args.flag_extra_data.as_ref()) {
|
||||||
Some(ref x) if x.len() <= 32 => x.as_bytes().to_owned(),
|
Some(ref x) if x.len() <= 32 => x.as_bytes().to_owned(),
|
||||||
None => version_data(),
|
None => version_data(),
|
||||||
Some(ref x) => { die!("{}: Extra data must be at most 32 characters.", x); }
|
Some(ref x) => { die!("{}: Extra data must be at most 32 characters.", x); }
|
||||||
@ -299,53 +318,40 @@ impl Configuration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn init_nodes(&self, spec: &Spec) -> Vec<String> {
|
fn init_nodes(&self, spec: &Spec) -> Vec<String> {
|
||||||
if self.args.flag_no_bootstrap { Vec::new() } else {
|
let mut r = if self.args.flag_no_bootstrap { Vec::new() } else { spec.nodes().clone() };
|
||||||
match self.args.arg_enode.len() {
|
if let Some(ref x) = self.args.flag_bootnodes {
|
||||||
0 => spec.nodes().clone(),
|
r.extend(x.split(',').map(|s| {
|
||||||
_ => self.args.arg_enode.iter().map(|s| Self::normalize_enode(s).unwrap_or_else(|| {
|
Self::normalize_enode(s).unwrap_or_else(|| {
|
||||||
die!("{}: Invalid node address format given for a boot node.", s)
|
die!("{}: Invalid node address format given for a boot node.", s)
|
||||||
})).collect(),
|
})
|
||||||
}
|
}));
|
||||||
}
|
}
|
||||||
|
r
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(useless_format))]
|
#[cfg_attr(feature="dev", allow(useless_format))]
|
||||||
fn net_addresses(&self) -> (Option<SocketAddr>, Option<SocketAddr>) {
|
fn net_addresses(&self) -> (Option<SocketAddr>, Option<SocketAddr>) {
|
||||||
let mut listen_address = None;
|
let listen_address = Some(SocketAddr::new(IpAddr::from_str("0.0.0.0").unwrap(), self.args.flag_port));
|
||||||
let mut public_address = None;
|
let public_address = if self.args.flag_nat.starts_with("extip:") {
|
||||||
|
let host = &self.args.flag_nat[6..];
|
||||||
if let Some(ref a) = self.args.flag_address {
|
let host = IpAddr::from_str(host).unwrap_or_else(|_| die!("Invalid host given with `--nat extip:{}`", host));
|
||||||
public_address = Some(SocketAddr::from_str(a.as_ref()).unwrap_or_else(|_| {
|
Some(SocketAddr::new(host, self.args.flag_port))
|
||||||
die!("{}: Invalid listen/public address given with --address", a)
|
} else {
|
||||||
}));
|
listen_address.clone()
|
||||||
listen_address = public_address;
|
};
|
||||||
}
|
|
||||||
if listen_address.is_none() {
|
|
||||||
listen_address = Some(SocketAddr::from_str(self.args.flag_listen_address.as_ref()).unwrap_or_else(|_| {
|
|
||||||
die!("{}: Invalid listen/public address given with --listen-address", self.args.flag_listen_address)
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
if let Some(ref a) = self.args.flag_public_address {
|
|
||||||
if public_address.is_some() {
|
|
||||||
die!("Conflicting flags provided: --address and --public-address");
|
|
||||||
}
|
|
||||||
public_address = Some(SocketAddr::from_str(a.as_ref()).unwrap_or_else(|_| {
|
|
||||||
die!("{}: Invalid listen/public address given with --public-address", a)
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
(listen_address, public_address)
|
(listen_address, public_address)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn net_settings(&self, spec: &Spec) -> NetworkConfiguration {
|
fn net_settings(&self, spec: &Spec) -> NetworkConfiguration {
|
||||||
let mut ret = NetworkConfiguration::new();
|
let mut ret = NetworkConfiguration::new();
|
||||||
ret.nat_enabled = !self.args.flag_no_upnp;
|
ret.nat_enabled = self.args.flag_nat == "any" || self.args.flag_nat == "upnp";
|
||||||
ret.boot_nodes = self.init_nodes(spec);
|
ret.boot_nodes = self.init_nodes(spec);
|
||||||
let (listen, public) = self.net_addresses();
|
let (listen, public) = self.net_addresses();
|
||||||
ret.listen_address = listen;
|
ret.listen_address = listen;
|
||||||
ret.public_address = public;
|
ret.public_address = public;
|
||||||
ret.use_secret = self.args.flag_node_key.as_ref().map(|s| Secret::from_str(&s).unwrap_or_else(|_| s.sha3()));
|
ret.use_secret = self.args.flag_node_key.as_ref().map(|s| Secret::from_str(&s).unwrap_or_else(|_| s.sha3()));
|
||||||
ret.discovery_enabled = !self.args.flag_no_discovery;
|
ret.discovery_enabled = !self.args.flag_no_discovery && !self.args.flag_nodiscover;
|
||||||
ret.ideal_peers = self.args.flag_peers as u32;
|
ret.ideal_peers = self.args.flag_maxpeers.unwrap_or(self.args.flag_peers) as u32;
|
||||||
let mut net_path = PathBuf::from(&self.path());
|
let mut net_path = PathBuf::from(&self.path());
|
||||||
net_path.push("network");
|
net_path.push("network");
|
||||||
ret.config_path = Some(net_path.to_str().unwrap().to_owned());
|
ret.config_path = Some(net_path.to_str().unwrap().to_owned());
|
||||||
@ -451,6 +457,9 @@ impl Configuration {
|
|||||||
// Sync
|
// Sync
|
||||||
let sync = EthSync::register(service.network(), sync_config, client.clone(), miner.clone());
|
let sync = EthSync::register(service.network(), sync_config, client.clone(), miner.clone());
|
||||||
|
|
||||||
|
// Secret Store
|
||||||
|
let account_service = Arc::new(AccountService::new());
|
||||||
|
|
||||||
// Setup rpc
|
// Setup rpc
|
||||||
if self.args.flag_jsonrpc || self.args.flag_rpc {
|
if self.args.flag_jsonrpc || self.args.flag_rpc {
|
||||||
let url = format!("{}:{}",
|
let url = format!("{}:{}",
|
||||||
@ -461,8 +470,13 @@ impl Configuration {
|
|||||||
let cors = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors);
|
let cors = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors);
|
||||||
// TODO: use this as the API list.
|
// TODO: use this as the API list.
|
||||||
let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis);
|
let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis);
|
||||||
let server_handler = setup_rpc_server(service.client(), sync.clone(), miner.clone(), &url, cors, apis.split(',').collect());
|
let server_handler = setup_rpc_server(
|
||||||
|
service.client(),
|
||||||
|
sync.clone(),
|
||||||
|
account_service.clone(),
|
||||||
|
miner.clone(),
|
||||||
|
&url, cors, apis.split(',').collect()
|
||||||
|
);
|
||||||
if let Some(handler) = server_handler {
|
if let Some(handler) = server_handler {
|
||||||
panic_handler.forward_from(handler.deref());
|
panic_handler.forward_from(handler.deref());
|
||||||
}
|
}
|
||||||
|
@ -27,9 +27,8 @@ serde_macros = { version = "0.7.0", optional = true }
|
|||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
serde_codegen = { version = "0.7.0", optional = true }
|
serde_codegen = { version = "0.7.0", optional = true }
|
||||||
syntex = "0.29.0"
|
syntex = "0.29.0"
|
||||||
rustc_version = "0.1"
|
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["serde_codegen"]
|
default = ["serde_codegen"]
|
||||||
nightly = ["serde_macros"]
|
nightly = ["serde_macros"]
|
||||||
dev = ["ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethminer/dev"]
|
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethminer/dev"]
|
||||||
|
@ -14,10 +14,6 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
extern crate rustc_version;
|
|
||||||
|
|
||||||
use rustc_version::{version_meta, Channel};
|
|
||||||
|
|
||||||
#[cfg(not(feature = "serde_macros"))]
|
#[cfg(not(feature = "serde_macros"))]
|
||||||
mod inner {
|
mod inner {
|
||||||
extern crate syntex;
|
extern crate syntex;
|
||||||
@ -46,7 +42,4 @@ mod inner {
|
|||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
inner::main();
|
inner::main();
|
||||||
if let Channel::Nightly = version_meta().channel {
|
|
||||||
println!("cargo:rustc-cfg=nightly");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -30,23 +30,27 @@ use ethcore::views::*;
|
|||||||
use ethcore::ethereum::Ethash;
|
use ethcore::ethereum::Ethash;
|
||||||
use ethcore::ethereum::denominations::shannon;
|
use ethcore::ethereum::denominations::shannon;
|
||||||
use v1::traits::{Eth, EthFilter};
|
use v1::traits::{Eth, EthFilter};
|
||||||
use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, OptionalValue, Index, Filter, Log};
|
use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, TransactionRequest, OptionalValue, Index, Filter, Log};
|
||||||
use v1::helpers::{PollFilter, PollManager};
|
use v1::helpers::{PollFilter, PollManager};
|
||||||
|
use util::keys::store::AccountProvider;
|
||||||
|
|
||||||
/// Eth rpc implementation.
|
/// Eth rpc implementation.
|
||||||
pub struct EthClient<C, S, M>
|
pub struct EthClient<C, S, A, M>
|
||||||
where C: BlockChainClient,
|
where C: BlockChainClient,
|
||||||
S: SyncProvider,
|
S: SyncProvider,
|
||||||
|
A: AccountProvider,
|
||||||
M: MinerService {
|
M: MinerService {
|
||||||
client: Weak<C>,
|
client: Weak<C>,
|
||||||
sync: Weak<S>,
|
sync: Weak<S>,
|
||||||
|
accounts: Weak<A>,
|
||||||
miner: Weak<M>,
|
miner: Weak<M>,
|
||||||
hashrates: RwLock<HashMap<H256, u64>>,
|
hashrates: RwLock<HashMap<H256, u64>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C, S, M> EthClient<C, S, M>
|
impl<C, S, A, M> EthClient<C, S, A, M>
|
||||||
where C: BlockChainClient,
|
where C: BlockChainClient,
|
||||||
S: SyncProvider,
|
S: SyncProvider,
|
||||||
|
A: AccountProvider,
|
||||||
M: MinerService {
|
M: MinerService {
|
||||||
/// Creates new EthClient.
|
/// Creates new EthClient.
|
||||||
pub fn new(client: &Arc<C>, sync: &Arc<S>, miner: &Arc<M>) -> Self {
|
pub fn new(client: &Arc<C>, sync: &Arc<S>, miner: &Arc<M>) -> Self {
|
||||||
@ -54,6 +58,7 @@ impl<C, S, M> EthClient<C, S, M>
|
|||||||
client: Arc::downgrade(client),
|
client: Arc::downgrade(client),
|
||||||
sync: Arc::downgrade(sync),
|
sync: Arc::downgrade(sync),
|
||||||
miner: Arc::downgrade(miner),
|
miner: Arc::downgrade(miner),
|
||||||
|
accounts: Arc::downgrade(accounts),
|
||||||
hashrates: RwLock::new(HashMap::new()),
|
hashrates: RwLock::new(HashMap::new()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -104,10 +109,12 @@ impl<C, S, M> EthClient<C, S, M>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C, S, M> Eth for EthClient<C, S, M>
|
impl<C, S, A, M> Eth for EthClient<C, S, A, M>
|
||||||
where C: BlockChainClient + 'static,
|
where C: BlockChainClient + 'static,
|
||||||
S: SyncProvider + 'static,
|
S: SyncProvider + 'static,
|
||||||
|
A: AccountProvider + 'static,
|
||||||
M: MinerService + 'static {
|
M: MinerService + 'static {
|
||||||
|
|
||||||
fn protocol_version(&self, params: Params) -> Result<Value, Error> {
|
fn protocol_version(&self, params: Params) -> Result<Value, Error> {
|
||||||
match params {
|
match params {
|
||||||
Params::None => to_value(&U256::from(take_weak!(self.sync).status().protocol_version)),
|
Params::None => to_value(&U256::from(take_weak!(self.sync).status().protocol_version)),
|
||||||
@ -171,7 +178,7 @@ impl<C, S, M> Eth for EthClient<C, S, M>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_transaction_count(&self, params: Params) -> Result<Value, Error> {
|
fn block_transaction_count_by_hash(&self, params: Params) -> Result<Value, Error> {
|
||||||
from_params::<(H256,)>(params)
|
from_params::<(H256,)>(params)
|
||||||
.and_then(|(hash,)| match take_weak!(self.client).block(BlockId::Hash(hash)) {
|
.and_then(|(hash,)| match take_weak!(self.client).block(BlockId::Hash(hash)) {
|
||||||
Some(bytes) => to_value(&BlockView::new(&bytes).transactions_count()),
|
Some(bytes) => to_value(&BlockView::new(&bytes).transactions_count()),
|
||||||
@ -179,6 +186,17 @@ impl<C, S, M> Eth for EthClient<C, S, M>
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn block_transaction_count_by_number(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
from_params::<(BlockNumber,)>(params)
|
||||||
|
.and_then(|(block_number,)| match block_number {
|
||||||
|
BlockNumber::Pending => to_value(&take_weak!(self.sync).status().transaction_queue_pending),
|
||||||
|
_ => match take_weak!(self.client).block(block_number.into()) {
|
||||||
|
Some(bytes) => to_value(&BlockView::new(&bytes).transactions_count()),
|
||||||
|
None => Ok(Value::Null)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
fn block_uncles_count(&self, params: Params) -> Result<Value, Error> {
|
fn block_uncles_count(&self, params: Params) -> Result<Value, Error> {
|
||||||
from_params::<(H256,)>(params)
|
from_params::<(H256,)>(params)
|
||||||
.and_then(|(hash,)| match take_weak!(self.client).block(BlockId::Hash(hash)) {
|
.and_then(|(hash,)| match take_weak!(self.client).block(BlockId::Hash(hash)) {
|
||||||
@ -267,6 +285,24 @@ impl<C, S, M> Eth for EthClient<C, S, M>
|
|||||||
to_value(&true)
|
to_value(&true)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn send_transaction(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
from_params::<(TransactionRequest, )>(params)
|
||||||
|
.and_then(|(transaction_request, )| {
|
||||||
|
let accounts = take_weak!(self.accounts);
|
||||||
|
match accounts.account_secret(&transaction_request.from) {
|
||||||
|
Ok(secret) => {
|
||||||
|
let sync = take_weak!(self.sync);
|
||||||
|
let (transaction, _) = transaction_request.to_eth();
|
||||||
|
let signed_transaction = transaction.sign(&secret);
|
||||||
|
let hash = signed_transaction.hash();
|
||||||
|
sync.insert_transaction(signed_transaction);
|
||||||
|
to_value(&hash)
|
||||||
|
},
|
||||||
|
Err(_) => { to_value(&U256::zero()) }
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Eth filter rpc implementation.
|
/// Eth filter rpc implementation.
|
||||||
|
@ -36,10 +36,15 @@ impl<S> NetClient<S> where S: SyncProvider {
|
|||||||
|
|
||||||
impl<S> Net for NetClient<S> where S: SyncProvider + 'static {
|
impl<S> Net for NetClient<S> where S: SyncProvider + 'static {
|
||||||
fn version(&self, _: Params) -> Result<Value, Error> {
|
fn version(&self, _: Params) -> Result<Value, Error> {
|
||||||
Ok(Value::U64(take_weak!(self.sync).status().protocol_version as u64))
|
Ok(Value::String(format!("{}", take_weak!(self.sync).status().protocol_version).to_owned()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn peer_count(&self, _params: Params) -> Result<Value, Error> {
|
fn peer_count(&self, _params: Params) -> Result<Value, Error> {
|
||||||
Ok(Value::U64(take_weak!(self.sync).status().num_peers as u64))
|
Ok(Value::String(format!("0x{:x}", take_weak!(self.sync).status().num_peers as u64).to_owned()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_listening(&self, _: Params) -> Result<Value, Error> {
|
||||||
|
// right now (11 march 2016), we are always listening for incoming connections
|
||||||
|
Ok(Value::Bool(true))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,30 +20,28 @@ use jsonrpc_core::*;
|
|||||||
use v1::traits::Personal;
|
use v1::traits::Personal;
|
||||||
use util::keys::store::*;
|
use util::keys::store::*;
|
||||||
use util::Address;
|
use util::Address;
|
||||||
use std::sync::RwLock;
|
|
||||||
|
|
||||||
/// Account management (personal) rpc implementation.
|
/// Account management (personal) rpc implementation.
|
||||||
pub struct PersonalClient {
|
pub struct PersonalClient {
|
||||||
secret_store: Weak<RwLock<SecretStore>>,
|
accounts: Weak<AccountProvider>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PersonalClient {
|
impl PersonalClient {
|
||||||
/// Creates new PersonalClient
|
/// Creates new PersonalClient
|
||||||
pub fn new(store: &Arc<RwLock<SecretStore>>) -> Self {
|
pub fn new(store: &Arc<AccountProvider>) -> Self {
|
||||||
PersonalClient {
|
PersonalClient {
|
||||||
secret_store: Arc::downgrade(store),
|
accounts: Arc::downgrade(store),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Personal for PersonalClient {
|
impl Personal for PersonalClient {
|
||||||
fn accounts(&self, _: Params) -> Result<Value, Error> {
|
fn accounts(&self, _: Params) -> Result<Value, Error> {
|
||||||
let store_wk = take_weak!(self.secret_store);
|
let store = take_weak!(self.accounts);
|
||||||
let store = store_wk.read().unwrap();
|
|
||||||
match store.accounts() {
|
match store.accounts() {
|
||||||
Ok(account_list) => {
|
Ok(account_list) => {
|
||||||
Ok(Value::Array(account_list.iter()
|
Ok(Value::Array(account_list.iter()
|
||||||
.map(|&(account, _)| Value::String(format!("{:?}", account)))
|
.map(|&account| Value::String(format!("{:?}", account)))
|
||||||
.collect::<Vec<Value>>())
|
.collect::<Vec<Value>>())
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -54,8 +52,7 @@ impl Personal for PersonalClient {
|
|||||||
fn new_account(&self, params: Params) -> Result<Value, Error> {
|
fn new_account(&self, params: Params) -> Result<Value, Error> {
|
||||||
from_params::<(String, )>(params).and_then(
|
from_params::<(String, )>(params).and_then(
|
||||||
|(pass, )| {
|
|(pass, )| {
|
||||||
let store_wk = take_weak!(self.secret_store);
|
let store = take_weak!(self.accounts);
|
||||||
let mut store = store_wk.write().unwrap();
|
|
||||||
match store.new_account(&pass) {
|
match store.new_account(&pass) {
|
||||||
Ok(address) => Ok(Value::String(format!("{:?}", address))),
|
Ok(address) => Ok(Value::String(format!("{:?}", address))),
|
||||||
Err(_) => Err(Error::internal_error())
|
Err(_) => Err(Error::internal_error())
|
||||||
@ -67,8 +64,7 @@ impl Personal for PersonalClient {
|
|||||||
fn unlock_account(&self, params: Params) -> Result<Value, Error> {
|
fn unlock_account(&self, params: Params) -> Result<Value, Error> {
|
||||||
from_params::<(Address, String, u64)>(params).and_then(
|
from_params::<(Address, String, u64)>(params).and_then(
|
||||||
|(account, account_pass, _)|{
|
|(account, account_pass, _)|{
|
||||||
let store_wk = take_weak!(self.secret_store);
|
let store = take_weak!(self.accounts);
|
||||||
let store = store_wk.read().unwrap();
|
|
||||||
match store.unlock_account(&account, &account_pass) {
|
match store.unlock_account(&account, &account_pass) {
|
||||||
Ok(_) => Ok(Value::Bool(true)),
|
Ok(_) => Ok(Value::Bool(true)),
|
||||||
Err(_) => Ok(Value::Bool(false)),
|
Err(_) => Ok(Value::Bool(false)),
|
||||||
|
@ -21,9 +21,10 @@
|
|||||||
pub mod traits;
|
pub mod traits;
|
||||||
mod impls;
|
mod impls;
|
||||||
mod types;
|
mod types;
|
||||||
|
mod helpers;
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests;
|
mod tests;
|
||||||
mod helpers;
|
|
||||||
|
|
||||||
pub use self::traits::{Web3, Eth, EthFilter, Personal, Net};
|
pub use self::traits::{Web3, Eth, EthFilter, Personal, Net};
|
||||||
pub use self::impls::*;
|
pub use self::impls::*;
|
||||||
|
@ -14,12 +14,6 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
extern crate rustc_version;
|
mod sync_provider;
|
||||||
|
|
||||||
use rustc_version::{version_meta, Channel};
|
pub use self::sync_provider::{Config, TestSyncProvider};
|
||||||
|
|
||||||
fn main() {
|
|
||||||
if let Channel::Nightly = version_meta().channel {
|
|
||||||
println!("cargo:rustc-cfg=nightly");
|
|
||||||
}
|
|
||||||
}
|
|
58
rpc/src/v1/tests/helpers/sync_provider.rs
Normal file
58
rpc/src/v1/tests/helpers/sync_provider.rs
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use ethcore::transaction::SignedTransaction;
|
||||||
|
use ethsync::{SyncProvider, SyncStatus, SyncState};
|
||||||
|
|
||||||
|
pub struct Config {
|
||||||
|
pub protocol_version: u8,
|
||||||
|
pub num_peers: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct TestSyncProvider {
|
||||||
|
status: SyncStatus,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TestSyncProvider {
|
||||||
|
pub fn new(config: Config) -> Self {
|
||||||
|
TestSyncProvider {
|
||||||
|
status: SyncStatus {
|
||||||
|
state: SyncState::NotSynced,
|
||||||
|
protocol_version: config.protocol_version,
|
||||||
|
start_block_number: 0,
|
||||||
|
last_imported_block_number: None,
|
||||||
|
highest_block_number: None,
|
||||||
|
blocks_total: 0,
|
||||||
|
blocks_received: 0,
|
||||||
|
num_peers: config.num_peers,
|
||||||
|
num_active_peers: 0,
|
||||||
|
mem_used: 0,
|
||||||
|
transaction_queue_pending: 0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SyncProvider for TestSyncProvider {
|
||||||
|
fn status(&self) -> SyncStatus {
|
||||||
|
self.status.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn insert_transaction(&self, _transaction: SignedTransaction) {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -1 +1,20 @@
|
|||||||
//TODO: load custom blockchain state and test
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//!TODO: load custom blockchain state and test
|
||||||
|
|
||||||
|
mod net;
|
||||||
|
mod helpers;
|
||||||
|
66
rpc/src/v1/tests/net.rs
Normal file
66
rpc/src/v1/tests/net.rs
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
use jsonrpc_core::IoHandler;
|
||||||
|
use v1::{Net, NetClient};
|
||||||
|
use v1::tests::helpers::{Config, TestSyncProvider};
|
||||||
|
|
||||||
|
fn sync_provider() -> Arc<TestSyncProvider> {
|
||||||
|
Arc::new(TestSyncProvider::new(Config {
|
||||||
|
protocol_version: 65,
|
||||||
|
num_peers: 120,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn rpc_net_version() {
|
||||||
|
let sync = sync_provider();
|
||||||
|
let net = NetClient::new(&sync).to_delegate();
|
||||||
|
let io = IoHandler::new();
|
||||||
|
io.add_delegate(net);
|
||||||
|
|
||||||
|
let request = r#"{"jsonrpc": "2.0", "method": "net_version", "params": [], "id": 1}"#;
|
||||||
|
let response = r#"{"jsonrpc":"2.0","result":"65","id":1}"#;
|
||||||
|
|
||||||
|
assert_eq!(io.handle_request(request), Some(response.to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn rpc_net_peer_count() {
|
||||||
|
let sync = sync_provider();
|
||||||
|
let net = NetClient::new(&sync).to_delegate();
|
||||||
|
let io = IoHandler::new();
|
||||||
|
io.add_delegate(net);
|
||||||
|
|
||||||
|
let request = r#"{"jsonrpc": "2.0", "method": "net_peerCount", "params": [], "id": 1}"#;
|
||||||
|
let response = r#"{"jsonrpc":"2.0","result":"0x78","id":1}"#;
|
||||||
|
|
||||||
|
assert_eq!(io.handle_request(request), Some(response.to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn rpc_net_listening() {
|
||||||
|
let sync = sync_provider();
|
||||||
|
let net = NetClient::new(&sync).to_delegate();
|
||||||
|
let io = IoHandler::new();
|
||||||
|
io.add_delegate(net);
|
||||||
|
|
||||||
|
let request = r#"{"jsonrpc": "2.0", "method": "net_listening", "params": [], "id": 1}"#;
|
||||||
|
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
|
||||||
|
|
||||||
|
assert_eq!(io.handle_request(request), Some(response.to_string()));
|
||||||
|
}
|
@ -55,12 +55,15 @@ pub trait Eth: Sized + Send + Sync + 'static {
|
|||||||
|
|
||||||
/// Returns block with given number.
|
/// Returns block with given number.
|
||||||
fn block_by_number(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
|
fn block_by_number(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
|
||||||
|
|
||||||
/// Returns the number of transactions sent from given address at given time (block number).
|
/// Returns the number of transactions sent from given address at given time (block number).
|
||||||
fn transaction_count(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
|
fn transaction_count(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
|
||||||
|
|
||||||
/// Returns the number of transactions in a block.
|
/// Returns the number of transactions in a block given block hash.
|
||||||
fn block_transaction_count(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
|
fn block_transaction_count_by_hash(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
|
||||||
|
|
||||||
|
/// Returns the number of transactions in a block given block number.
|
||||||
|
fn block_transaction_count_by_number(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
|
||||||
|
|
||||||
/// Returns the number of uncles in a given block.
|
/// Returns the number of uncles in a given block.
|
||||||
fn block_uncles_count(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
|
fn block_uncles_count(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
|
||||||
@ -130,8 +133,8 @@ pub trait Eth: Sized + Send + Sync + 'static {
|
|||||||
delegate.add_method("eth_balance", Eth::balance);
|
delegate.add_method("eth_balance", Eth::balance);
|
||||||
delegate.add_method("eth_getStorageAt", Eth::storage_at);
|
delegate.add_method("eth_getStorageAt", Eth::storage_at);
|
||||||
delegate.add_method("eth_getTransactionCount", Eth::transaction_count);
|
delegate.add_method("eth_getTransactionCount", Eth::transaction_count);
|
||||||
delegate.add_method("eth_getBlockTransactionCountByHash", Eth::block_transaction_count);
|
delegate.add_method("eth_getBlockTransactionCountByHash", Eth::block_transaction_count_by_hash);
|
||||||
delegate.add_method("eth_getBlockTransactionCountByNumber", Eth::block_transaction_count);
|
delegate.add_method("eth_getBlockTransactionCountByNumber", Eth::block_transaction_count_by_number);
|
||||||
delegate.add_method("eth_getUncleCountByBlockHash", Eth::block_uncles_count);
|
delegate.add_method("eth_getUncleCountByBlockHash", Eth::block_uncles_count);
|
||||||
delegate.add_method("eth_getUncleCountByBlockNumber", Eth::block_uncles_count);
|
delegate.add_method("eth_getUncleCountByBlockNumber", Eth::block_uncles_count);
|
||||||
delegate.add_method("eth_code", Eth::code_at);
|
delegate.add_method("eth_code", Eth::code_at);
|
||||||
|
@ -15,7 +15,9 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use rustc_serialize::hex::ToHex;
|
use rustc_serialize::hex::ToHex;
|
||||||
use serde::{Serialize, Serializer};
|
use serde::{Serialize, Serializer, Deserialize, Deserializer, Error};
|
||||||
|
use serde::de::Visitor;
|
||||||
|
use util::common::FromHex;
|
||||||
|
|
||||||
/// Wrapper structure around vector of bytes.
|
/// Wrapper structure around vector of bytes.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@ -26,6 +28,7 @@ impl Bytes {
|
|||||||
pub fn new(bytes: Vec<u8>) -> Bytes {
|
pub fn new(bytes: Vec<u8>) -> Bytes {
|
||||||
Bytes(bytes)
|
Bytes(bytes)
|
||||||
}
|
}
|
||||||
|
pub fn to_vec(self) -> Vec<u8> { let Bytes(x) = self; x }
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Bytes {
|
impl Default for Bytes {
|
||||||
@ -36,7 +39,7 @@ impl Default for Bytes {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Serialize for Bytes {
|
impl Serialize for Bytes {
|
||||||
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
|
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
|
||||||
where S: Serializer {
|
where S: Serializer {
|
||||||
let mut serialized = "0x".to_owned();
|
let mut serialized = "0x".to_owned();
|
||||||
serialized.push_str(self.0.to_hex().as_ref());
|
serialized.push_str(self.0.to_hex().as_ref());
|
||||||
@ -44,6 +47,32 @@ impl Serialize for Bytes {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Deserialize for Bytes {
|
||||||
|
fn deserialize<D>(deserializer: &mut D) -> Result<Bytes, D::Error>
|
||||||
|
where D: Deserializer {
|
||||||
|
deserializer.deserialize(BytesVisitor)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct BytesVisitor;
|
||||||
|
|
||||||
|
impl Visitor for BytesVisitor {
|
||||||
|
type Value = Bytes;
|
||||||
|
|
||||||
|
fn visit_str<E>(&mut self, value: &str) -> Result<Self::Value, E> where E: Error {
|
||||||
|
if value.len() >= 2 && &value[0..2] == "0x" {
|
||||||
|
Ok(Bytes::new(FromHex::from_hex(&value[2..]).unwrap_or_else(|_| vec![])))
|
||||||
|
} else {
|
||||||
|
Err(Error::custom("invalid hex"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn visit_string<E>(&mut self, value: String) -> Result<Self::Value, E> where E: Error {
|
||||||
|
self.visit_str(value.as_ref())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
@ -33,3 +33,5 @@ pub use self::log::Log;
|
|||||||
pub use self::optionals::OptionalValue;
|
pub use self::optionals::OptionalValue;
|
||||||
pub use self::sync::{SyncStatus, SyncInfo};
|
pub use self::sync::{SyncStatus, SyncInfo};
|
||||||
pub use self::transaction::Transaction;
|
pub use self::transaction::Transaction;
|
||||||
|
pub use self::transaction::TransactionRequest;
|
||||||
|
|
||||||
|
@ -17,6 +17,8 @@
|
|||||||
use util::numbers::*;
|
use util::numbers::*;
|
||||||
use ethcore::transaction::{LocalizedTransaction, Action};
|
use ethcore::transaction::{LocalizedTransaction, Action};
|
||||||
use v1::types::{Bytes, OptionalValue};
|
use v1::types::{Bytes, OptionalValue};
|
||||||
|
use serde::{Deserializer, Error};
|
||||||
|
use ethcore;
|
||||||
|
|
||||||
#[derive(Debug, Default, Serialize)]
|
#[derive(Debug, Default, Serialize)]
|
||||||
pub struct Transaction {
|
pub struct Transaction {
|
||||||
@ -37,6 +39,35 @@ pub struct Transaction {
|
|||||||
pub input: Bytes
|
pub input: Bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||||
|
pub struct TransactionRequest {
|
||||||
|
pub from: Address,
|
||||||
|
pub to: Option<Address>,
|
||||||
|
#[serde(rename="gasPrice")]
|
||||||
|
pub gas_price: Option<U256>,
|
||||||
|
pub gas: Option<U256>,
|
||||||
|
pub value: Option<U256>,
|
||||||
|
pub data: Bytes,
|
||||||
|
pub nonce: Option<U256>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TransactionRequest {
|
||||||
|
/// maps transaction request to the transaction that can be signed and inserted
|
||||||
|
pub fn to_eth(self) -> (ethcore::transaction::Transaction, Address) {
|
||||||
|
(ethcore::transaction::Transaction {
|
||||||
|
nonce: self.nonce.unwrap_or(U256::zero()),
|
||||||
|
action: match self.to {
|
||||||
|
None => ethcore::transaction::Action::Create,
|
||||||
|
Some(addr) => ethcore::transaction::Action::Call(addr)
|
||||||
|
},
|
||||||
|
gas: self.gas.unwrap_or(U256::zero()),
|
||||||
|
gas_price: self.gas_price.unwrap_or(U256::zero()),
|
||||||
|
value: self.value.unwrap_or(U256::zero()),
|
||||||
|
data: self.data.to_vec()
|
||||||
|
}, self.from)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<LocalizedTransaction> for Transaction {
|
impl From<LocalizedTransaction> for Transaction {
|
||||||
fn from(t: LocalizedTransaction) -> Transaction {
|
fn from(t: LocalizedTransaction) -> Transaction {
|
||||||
Transaction {
|
Transaction {
|
||||||
|
@ -4,13 +4,9 @@ name = "ethsync"
|
|||||||
version = "0.9.99"
|
version = "0.9.99"
|
||||||
license = "GPL-3.0"
|
license = "GPL-3.0"
|
||||||
authors = ["Ethcore <admin@ethcore.io"]
|
authors = ["Ethcore <admin@ethcore.io"]
|
||||||
build = "build.rs"
|
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
|
|
||||||
[build-dependencies]
|
|
||||||
rustc_version = "0.1"
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
ethcore-util = { path = "../util" }
|
ethcore-util = { path = "../util" }
|
||||||
ethcore = { path = "../ethcore" }
|
ethcore = { path = "../ethcore" }
|
||||||
@ -24,4 +20,4 @@ clippy = { version = "0.0.44", optional = true }
|
|||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = []
|
default = []
|
||||||
dev = ["ethcore/dev", "ethcore-util/dev", "ethminer/dev"]
|
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethminer/dev"]
|
||||||
|
@ -118,6 +118,7 @@ pub enum SyncState {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Syncing status and statistics
|
/// Syncing status and statistics
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct SyncStatus {
|
pub struct SyncStatus {
|
||||||
/// State
|
/// State
|
||||||
pub state: SyncState,
|
pub state: SyncState,
|
||||||
@ -273,7 +274,7 @@ impl ChainSync {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(for_kv_map))] // Because it's not possible to get `values_mut()`
|
#[cfg_attr(feature="dev", allow(for_kv_map))] // Because it's not possible to get `values_mut()`
|
||||||
/// Rest sync. Clear all downloaded data but keep the queue
|
/// Rest sync. Clear all downloaded data but keep the queue
|
||||||
fn reset(&mut self) {
|
fn reset(&mut self) {
|
||||||
self.downloading_headers.clear();
|
self.downloading_headers.clear();
|
||||||
@ -340,7 +341,7 @@ impl ChainSync {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
||||||
/// Called by peer once it has new block headers during sync
|
/// Called by peer once it has new block headers during sync
|
||||||
fn on_peer_block_headers(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
|
fn on_peer_block_headers(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
|
||||||
self.reset_peer_asking(peer_id, PeerAsking::BlockHeaders);
|
self.reset_peer_asking(peer_id, PeerAsking::BlockHeaders);
|
||||||
@ -467,7 +468,7 @@ impl ChainSync {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Called by peer once it has new block bodies
|
/// Called by peer once it has new block bodies
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
||||||
fn on_peer_new_block(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
|
fn on_peer_new_block(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
|
||||||
let block_rlp = try!(r.at(0));
|
let block_rlp = try!(r.at(0));
|
||||||
let header_rlp = try!(block_rlp.at(0));
|
let header_rlp = try!(block_rlp.at(0));
|
||||||
|
@ -15,11 +15,11 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
#![warn(missing_docs)]
|
#![warn(missing_docs)]
|
||||||
#![cfg_attr(all(nightly, feature="dev"), feature(plugin))]
|
#![cfg_attr(feature="dev", feature(plugin))]
|
||||||
#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))]
|
#![cfg_attr(feature="dev", plugin(clippy))]
|
||||||
|
|
||||||
// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref.
|
// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref.
|
||||||
#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))]
|
#![cfg_attr(feature="dev", allow(clone_on_copy))]
|
||||||
|
|
||||||
//! Blockchain sync module
|
//! Blockchain sync module
|
||||||
//! Implements ethereum protocol version 63 as specified here:
|
//! Implements ethereum protocol version 63 as specified here:
|
||||||
|
@ -42,7 +42,7 @@ pub trait RangeCollection<K, V> {
|
|||||||
fn remove_head(&mut self, start: &K);
|
fn remove_head(&mut self, start: &K);
|
||||||
/// Remove all elements >= `start` in the range that contains `start`
|
/// Remove all elements >= `start` in the range that contains `start`
|
||||||
fn remove_tail(&mut self, start: &K);
|
fn remove_tail(&mut self, start: &K);
|
||||||
/// Remove all elements >= `start`
|
/// Remove all elements >= `start`
|
||||||
fn remove_from(&mut self, start: &K);
|
fn remove_from(&mut self, start: &K);
|
||||||
/// Remove all elements >= `tail`
|
/// Remove all elements >= `tail`
|
||||||
fn insert_item(&mut self, key: K, value: V);
|
fn insert_item(&mut self, key: K, value: V);
|
||||||
@ -231,7 +231,7 @@ impl<K, V> RangeCollection<K, V> for Vec<(K, Vec<V>)> where K: Ord + PartialEq +
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
||||||
fn test_range() {
|
fn test_range() {
|
||||||
use std::cmp::{Ordering};
|
use std::cmp::{Ordering};
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use util::*;
|
use util::*;
|
||||||
use ethcore::client::{BlockChainClient, BlockId};
|
use ethcore::client::{BlockChainClient, BlockId, EachBlockWith};
|
||||||
use io::SyncIo;
|
use io::SyncIo;
|
||||||
use chain::{SyncState};
|
use chain::{SyncState};
|
||||||
use super::helpers::*;
|
use super::helpers::*;
|
||||||
|
@ -15,310 +15,11 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use util::*;
|
use util::*;
|
||||||
use ::SyncConfig;
|
use ethcore::client::{TestBlockChainClient, BlockChainClient};
|
||||||
use ethcore::client::{BlockChainClient, BlockStatus, TreeRoute, BlockChainInfo, TransactionId, BlockId, BlockQueueInfo};
|
|
||||||
use ethcore::header::{Header as BlockHeader, BlockNumber};
|
|
||||||
use ethcore::error::*;
|
|
||||||
use ethcore::receipt::Receipt;
|
|
||||||
use ethcore::transaction::{LocalizedTransaction, SignedTransaction, Transaction, Action};
|
|
||||||
use ethcore::filter::Filter;
|
|
||||||
use ethcore::log_entry::LocalizedLogEntry;
|
|
||||||
use ethcore::block::{ClosedBlock, SealedBlock};
|
|
||||||
use ethminer::Miner;
|
|
||||||
use io::SyncIo;
|
use io::SyncIo;
|
||||||
use chain::ChainSync;
|
use chain::ChainSync;
|
||||||
|
use ethminer::Miner;
|
||||||
pub struct TestBlockChainClient {
|
use ::SyncConfig;
|
||||||
pub blocks: RwLock<HashMap<H256, Bytes>>,
|
|
||||||
pub numbers: RwLock<HashMap<usize, H256>>,
|
|
||||||
pub genesis_hash: H256,
|
|
||||||
pub last_hash: RwLock<H256>,
|
|
||||||
pub difficulty: RwLock<U256>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub enum EachBlockWith {
|
|
||||||
Nothing,
|
|
||||||
Uncle,
|
|
||||||
Transaction,
|
|
||||||
UncleAndTransaction
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TestBlockChainClient {
|
|
||||||
pub fn new() -> TestBlockChainClient {
|
|
||||||
|
|
||||||
let mut client = TestBlockChainClient {
|
|
||||||
blocks: RwLock::new(HashMap::new()),
|
|
||||||
numbers: RwLock::new(HashMap::new()),
|
|
||||||
genesis_hash: H256::new(),
|
|
||||||
last_hash: RwLock::new(H256::new()),
|
|
||||||
difficulty: RwLock::new(From::from(0)),
|
|
||||||
};
|
|
||||||
client.add_blocks(1, EachBlockWith::Nothing); // add genesis block
|
|
||||||
client.genesis_hash = client.last_hash.read().unwrap().clone();
|
|
||||||
client
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn add_blocks(&mut self, count: usize, with: EachBlockWith) {
|
|
||||||
let len = self.numbers.read().unwrap().len();
|
|
||||||
for n in len..(len + count) {
|
|
||||||
let mut header = BlockHeader::new();
|
|
||||||
header.difficulty = From::from(n);
|
|
||||||
header.parent_hash = self.last_hash.read().unwrap().clone();
|
|
||||||
header.number = n as BlockNumber;
|
|
||||||
let uncles = match with {
|
|
||||||
EachBlockWith::Uncle | EachBlockWith::UncleAndTransaction => {
|
|
||||||
let mut uncles = RlpStream::new_list(1);
|
|
||||||
let mut uncle_header = BlockHeader::new();
|
|
||||||
uncle_header.difficulty = From::from(n);
|
|
||||||
uncle_header.parent_hash = self.last_hash.read().unwrap().clone();
|
|
||||||
uncle_header.number = n as BlockNumber;
|
|
||||||
uncles.append(&uncle_header);
|
|
||||||
header.uncles_hash = uncles.as_raw().sha3();
|
|
||||||
uncles
|
|
||||||
},
|
|
||||||
_ => RlpStream::new_list(0)
|
|
||||||
};
|
|
||||||
let txs = match with {
|
|
||||||
EachBlockWith::Transaction | EachBlockWith::UncleAndTransaction => {
|
|
||||||
let mut txs = RlpStream::new_list(1);
|
|
||||||
let keypair = KeyPair::create().unwrap();
|
|
||||||
let tx = Transaction {
|
|
||||||
action: Action::Create,
|
|
||||||
value: U256::from(100),
|
|
||||||
data: "3331600055".from_hex().unwrap(),
|
|
||||||
gas: U256::from(100_000),
|
|
||||||
gas_price: U256::one(),
|
|
||||||
nonce: U256::zero()
|
|
||||||
};
|
|
||||||
let signed_tx = tx.sign(&keypair.secret());
|
|
||||||
txs.append(&signed_tx);
|
|
||||||
txs.out()
|
|
||||||
},
|
|
||||||
_ => rlp::NULL_RLP.to_vec()
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut rlp = RlpStream::new_list(3);
|
|
||||||
rlp.append(&header);
|
|
||||||
rlp.append_raw(&txs, 1);
|
|
||||||
rlp.append_raw(uncles.as_raw(), 1);
|
|
||||||
self.import_block(rlp.as_raw().to_vec()).unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn corrupt_block(&mut self, n: BlockNumber) {
|
|
||||||
let hash = self.block_hash(BlockId::Number(n)).unwrap();
|
|
||||||
let mut header: BlockHeader = decode(&self.block_header(BlockId::Number(n)).unwrap());
|
|
||||||
header.parent_hash = H256::new();
|
|
||||||
let mut rlp = RlpStream::new_list(3);
|
|
||||||
rlp.append(&header);
|
|
||||||
rlp.append_raw(&rlp::NULL_RLP, 1);
|
|
||||||
rlp.append_raw(&rlp::NULL_RLP, 1);
|
|
||||||
self.blocks.write().unwrap().insert(hash, rlp.out());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn block_hash_delta_minus(&mut self, delta: usize) -> H256 {
|
|
||||||
let blocks_read = self.numbers.read().unwrap();
|
|
||||||
let index = blocks_read.len() - delta;
|
|
||||||
blocks_read[&index].clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block_hash(&self, id: BlockId) -> Option<H256> {
|
|
||||||
match id {
|
|
||||||
BlockId::Hash(hash) => Some(hash),
|
|
||||||
BlockId::Number(n) => self.numbers.read().unwrap().get(&(n as usize)).cloned(),
|
|
||||||
BlockId::Earliest => self.numbers.read().unwrap().get(&0).cloned(),
|
|
||||||
BlockId::Latest => self.numbers.read().unwrap().get(&(self.numbers.read().unwrap().len() - 1)).cloned()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BlockChainClient for TestBlockChainClient {
|
|
||||||
fn block_total_difficulty(&self, _id: BlockId) -> Option<U256> {
|
|
||||||
Some(U256::zero())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block_hash(&self, _id: BlockId) -> Option<H256> {
|
|
||||||
unimplemented!();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn nonce(&self, _address: &Address) -> U256 {
|
|
||||||
U256::zero()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn code(&self, _address: &Address) -> Option<Bytes> {
|
|
||||||
unimplemented!();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn transaction(&self, _id: TransactionId) -> Option<LocalizedTransaction> {
|
|
||||||
unimplemented!();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn blocks_with_bloom(&self, _bloom: &H2048, _from_block: BlockId, _to_block: BlockId) -> Option<Vec<BlockNumber>> {
|
|
||||||
unimplemented!();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn logs(&self, _filter: Filter) -> Vec<LocalizedLogEntry> {
|
|
||||||
unimplemented!();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block_header(&self, id: BlockId) -> Option<Bytes> {
|
|
||||||
self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).map(|r| Rlp::new(r).at(0).as_raw().to_vec()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block_body(&self, id: BlockId) -> Option<Bytes> {
|
|
||||||
self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).map(|r| {
|
|
||||||
let mut stream = RlpStream::new_list(2);
|
|
||||||
stream.append_raw(Rlp::new(&r).at(1).as_raw(), 1);
|
|
||||||
stream.append_raw(Rlp::new(&r).at(2).as_raw(), 1);
|
|
||||||
stream.out()
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block(&self, id: BlockId) -> Option<Bytes> {
|
|
||||||
self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).cloned())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block_status(&self, id: BlockId) -> BlockStatus {
|
|
||||||
match id {
|
|
||||||
BlockId::Number(number) if (number as usize) < self.blocks.read().unwrap().len() => BlockStatus::InChain,
|
|
||||||
BlockId::Hash(ref hash) if self.blocks.read().unwrap().get(hash).is_some() => BlockStatus::InChain,
|
|
||||||
_ => BlockStatus::Unknown
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// works only if blocks are one after another 1 -> 2 -> 3
|
|
||||||
fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute> {
|
|
||||||
Some(TreeRoute {
|
|
||||||
ancestor: H256::new(),
|
|
||||||
index: 0,
|
|
||||||
blocks: {
|
|
||||||
let numbers_read = self.numbers.read().unwrap();
|
|
||||||
let mut adding = false;
|
|
||||||
|
|
||||||
let mut blocks = Vec::new();
|
|
||||||
for (_, hash) in numbers_read.iter().sort_by(|tuple1, tuple2| tuple1.0.cmp(tuple2.0)) {
|
|
||||||
if hash == to {
|
|
||||||
if adding {
|
|
||||||
blocks.push(hash.clone());
|
|
||||||
}
|
|
||||||
adding = false;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if hash == from {
|
|
||||||
adding = true;
|
|
||||||
}
|
|
||||||
if adding {
|
|
||||||
blocks.push(hash.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if adding { Vec::new() } else { blocks }
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: returns just hashes instead of node state rlp(?)
|
|
||||||
fn state_data(&self, hash: &H256) -> Option<Bytes> {
|
|
||||||
// starts with 'f' ?
|
|
||||||
if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") {
|
|
||||||
let mut rlp = RlpStream::new();
|
|
||||||
rlp.append(&hash.clone());
|
|
||||||
return Some(rlp.out());
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block_receipts(&self, hash: &H256) -> Option<Bytes> {
|
|
||||||
// starts with 'f' ?
|
|
||||||
if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") {
|
|
||||||
let receipt = Receipt::new(
|
|
||||||
H256::zero(),
|
|
||||||
U256::zero(),
|
|
||||||
vec![]);
|
|
||||||
let mut rlp = RlpStream::new();
|
|
||||||
rlp.append(&receipt);
|
|
||||||
return Some(rlp.out());
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
fn import_block(&self, b: Bytes) -> ImportResult {
|
|
||||||
let header = Rlp::new(&b).val_at::<BlockHeader>(0);
|
|
||||||
let h = header.hash();
|
|
||||||
let number: usize = header.number as usize;
|
|
||||||
if number > self.blocks.read().unwrap().len() {
|
|
||||||
panic!("Unexpected block number. Expected {}, got {}", self.blocks.read().unwrap().len(), number);
|
|
||||||
}
|
|
||||||
if number > 0 {
|
|
||||||
match self.blocks.read().unwrap().get(&header.parent_hash) {
|
|
||||||
Some(parent) => {
|
|
||||||
let parent = Rlp::new(parent).val_at::<BlockHeader>(0);
|
|
||||||
if parent.number != (header.number - 1) {
|
|
||||||
panic!("Unexpected block parent");
|
|
||||||
}
|
|
||||||
},
|
|
||||||
None => {
|
|
||||||
panic!("Unknown block parent {:?} for block {}", header.parent_hash, number);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let len = self.numbers.read().unwrap().len();
|
|
||||||
if number == len {
|
|
||||||
{
|
|
||||||
let mut difficulty = self.difficulty.write().unwrap();
|
|
||||||
*difficulty.deref_mut() = *difficulty.deref() + header.difficulty;
|
|
||||||
}
|
|
||||||
mem::replace(self.last_hash.write().unwrap().deref_mut(), h.clone());
|
|
||||||
self.blocks.write().unwrap().insert(h.clone(), b);
|
|
||||||
self.numbers.write().unwrap().insert(number, h.clone());
|
|
||||||
let mut parent_hash = header.parent_hash;
|
|
||||||
if number > 0 {
|
|
||||||
let mut n = number - 1;
|
|
||||||
while n > 0 && self.numbers.read().unwrap()[&n] != parent_hash {
|
|
||||||
*self.numbers.write().unwrap().get_mut(&n).unwrap() = parent_hash.clone();
|
|
||||||
n -= 1;
|
|
||||||
parent_hash = Rlp::new(&self.blocks.read().unwrap()[&parent_hash]).val_at::<BlockHeader>(0).parent_hash;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
self.blocks.write().unwrap().insert(h.clone(), b.to_vec());
|
|
||||||
}
|
|
||||||
Ok(h)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn queue_info(&self) -> BlockQueueInfo {
|
|
||||||
BlockQueueInfo {
|
|
||||||
verified_queue_size: 0,
|
|
||||||
unverified_queue_size: 0,
|
|
||||||
verifying_queue_size: 0,
|
|
||||||
max_queue_size: 0,
|
|
||||||
max_mem_use: 0,
|
|
||||||
mem_used: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn clear_queue(&self) {
|
|
||||||
}
|
|
||||||
|
|
||||||
fn chain_info(&self) -> BlockChainInfo {
|
|
||||||
BlockChainInfo {
|
|
||||||
total_difficulty: *self.difficulty.read().unwrap(),
|
|
||||||
pending_total_difficulty: *self.difficulty.read().unwrap(),
|
|
||||||
genesis_hash: self.genesis_hash.clone(),
|
|
||||||
best_block_hash: self.last_hash.read().unwrap().clone(),
|
|
||||||
best_block_number: self.blocks.read().unwrap().len() as BlockNumber - 1,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn prepare_sealing(&self, _author: Address, _extra_data: Bytes, _transactions: Vec<SignedTransaction>) -> Option<ClosedBlock> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn try_seal(&self, _block: ClosedBlock, _seal: Vec<Bytes>) -> Result<SealedBlock, ClosedBlock> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct TestIo<'p> {
|
pub struct TestIo<'p> {
|
||||||
pub chain: &'p mut TestBlockChainClient,
|
pub chain: &'p mut TestBlockChainClient,
|
||||||
|
@ -40,8 +40,7 @@ chrono = "0.2"
|
|||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = []
|
default = []
|
||||||
dev = []
|
dev = ["clippy"]
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
vergen = "*"
|
vergen = "*"
|
||||||
rustc_version = "0.1"
|
|
||||||
|
@ -1103,7 +1103,7 @@ macro_rules! construct_uint {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(derive_hash_xor_eq))] // We are pretty sure it's ok.
|
#[cfg_attr(feature="dev", allow(derive_hash_xor_eq))] // We are pretty sure it's ok.
|
||||||
impl Hash for $name {
|
impl Hash for $name {
|
||||||
fn hash<H>(&self, state: &mut H) where H: Hasher {
|
fn hash<H>(&self, state: &mut H) where H: Hasher {
|
||||||
unsafe { state.write(::std::slice::from_raw_parts(self.0.as_ptr() as *mut u8, self.0.len() * 8)); }
|
unsafe { state.write(::std::slice::from_raw_parts(self.0.as_ptr() as *mut u8, self.0.len() * 8)); }
|
||||||
@ -1485,7 +1485,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(eq_op))]
|
#[cfg_attr(feature="dev", allow(eq_op))]
|
||||||
pub fn uint256_comp_test() {
|
pub fn uint256_comp_test() {
|
||||||
let small = U256([10u64, 0, 0, 0]);
|
let small = U256([10u64, 0, 0, 0]);
|
||||||
let big = U256([0x8C8C3EE70C644118u64, 0x0209E7378231E632, 0, 0]);
|
let big = U256([0x8C8C3EE70C644118u64, 0x0209E7378231E632, 0, 0]);
|
||||||
@ -2032,7 +2032,7 @@ mod tests {
|
|||||||
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
||||||
fn u256_multi_full_mul() {
|
fn u256_multi_full_mul() {
|
||||||
let result = U256([0, 0, 0, 0]).full_mul(U256([0, 0, 0, 0]));
|
let result = U256([0, 0, 0, 0]).full_mul(U256([0, 0, 0, 0]));
|
||||||
assert_eq!(U512([0, 0, 0, 0, 0, 0, 0, 0]), result);
|
assert_eq!(U512([0, 0, 0, 0, 0, 0, 0, 0]), result);
|
||||||
|
@ -14,15 +14,10 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
extern crate rustc_version;
|
|
||||||
extern crate vergen;
|
extern crate vergen;
|
||||||
|
|
||||||
use vergen::*;
|
use vergen::*;
|
||||||
use rustc_version::{version_meta, Channel};
|
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
vergen(OutputFns::all()).unwrap();
|
vergen(OutputFns::all()).unwrap();
|
||||||
if let Channel::Nightly = version_meta().channel {
|
|
||||||
println!("cargo:rustc-cfg=nightly");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -305,7 +305,7 @@ macro_rules! impl_hash {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Copy for $from {}
|
impl Copy for $from {}
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(expl_impl_clone_on_copy))]
|
#[cfg_attr(feature="dev", allow(expl_impl_clone_on_copy))]
|
||||||
impl Clone for $from {
|
impl Clone for $from {
|
||||||
fn clone(&self) -> $from {
|
fn clone(&self) -> $from {
|
||||||
unsafe {
|
unsafe {
|
||||||
@ -637,7 +637,7 @@ mod tests {
|
|||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(eq_op))]
|
#[cfg_attr(feature="dev", allow(eq_op))]
|
||||||
fn hash() {
|
fn hash() {
|
||||||
let h = H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]);
|
let h = H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]);
|
||||||
assert_eq!(H64::from_str("0123456789abcdef").unwrap(), h);
|
assert_eq!(H64::from_str("0123456789abcdef").unwrap(), h);
|
||||||
|
@ -78,6 +78,59 @@ struct AccountUnlock {
|
|||||||
expires: DateTime<UTC>,
|
expires: DateTime<UTC>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Basic account management trait
|
||||||
|
pub trait AccountProvider : Send + Sync {
|
||||||
|
/// Lists all accounts
|
||||||
|
fn accounts(&self) -> Result<Vec<Address>, ::std::io::Error>;
|
||||||
|
/// Unlocks account with the password provided
|
||||||
|
fn unlock_account(&self, account: &Address, pass: &str) -> Result<(), EncryptedHashMapError>;
|
||||||
|
/// Creates account
|
||||||
|
fn new_account(&self, pass: &str) -> Result<Address, ::std::io::Error>;
|
||||||
|
/// Returns secret for unlocked account
|
||||||
|
fn account_secret(&self, account: &Address) -> Result<crypto::Secret, SigningError>;
|
||||||
|
/// Returns secret for unlocked account
|
||||||
|
fn sign(&self, account: &Address, message: &H256) -> Result<crypto::Signature, SigningError>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Thread-safe accounts management
|
||||||
|
pub struct AccountService {
|
||||||
|
secret_store: RwLock<SecretStore>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AccountProvider for AccountService {
|
||||||
|
/// Lists all accounts
|
||||||
|
fn accounts(&self) -> Result<Vec<Address>, ::std::io::Error> {
|
||||||
|
Ok(try!(self.secret_store.read().unwrap().accounts()).iter().map(|&(addr, _)| addr).collect::<Vec<Address>>())
|
||||||
|
}
|
||||||
|
/// Unlocks account with the password provided
|
||||||
|
fn unlock_account(&self, account: &Address, pass: &str) -> Result<(), EncryptedHashMapError> {
|
||||||
|
self.secret_store.read().unwrap().unlock_account(account, pass)
|
||||||
|
}
|
||||||
|
/// Creates account
|
||||||
|
fn new_account(&self, pass: &str) -> Result<Address, ::std::io::Error> {
|
||||||
|
self.secret_store.write().unwrap().new_account(pass)
|
||||||
|
}
|
||||||
|
/// Returns secret for unlocked account
|
||||||
|
fn account_secret(&self, account: &Address) -> Result<crypto::Secret, SigningError> {
|
||||||
|
self.secret_store.read().unwrap().account_secret(account)
|
||||||
|
}
|
||||||
|
/// Returns secret for unlocked account
|
||||||
|
fn sign(&self, account: &Address, message: &H256) -> Result<crypto::Signature, SigningError> {
|
||||||
|
self.secret_store.read().unwrap().sign(account, message)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AccountService {
|
||||||
|
/// New account service with the default location
|
||||||
|
pub fn new() -> AccountService {
|
||||||
|
let secret_store = RwLock::new(SecretStore::new());
|
||||||
|
secret_store.write().unwrap().try_import_existing();
|
||||||
|
AccountService {
|
||||||
|
secret_store: secret_store
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl SecretStore {
|
impl SecretStore {
|
||||||
/// new instance of Secret Store in default home directory
|
/// new instance of Secret Store in default home directory
|
||||||
pub fn new() -> SecretStore {
|
pub fn new() -> SecretStore {
|
||||||
|
@ -15,18 +15,18 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
#![warn(missing_docs)]
|
#![warn(missing_docs)]
|
||||||
#![cfg_attr(all(nightly, feature="dev"), feature(plugin))]
|
#![cfg_attr(feature="dev", feature(plugin))]
|
||||||
#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))]
|
#![cfg_attr(feature="dev", plugin(clippy))]
|
||||||
|
|
||||||
// Clippy settings
|
// Clippy settings
|
||||||
// TODO [todr] not really sure
|
// TODO [todr] not really sure
|
||||||
#![cfg_attr(all(nightly, feature="dev"), allow(needless_range_loop))]
|
#![cfg_attr(feature="dev", allow(needless_range_loop))]
|
||||||
// Shorter than if-else
|
// Shorter than if-else
|
||||||
#![cfg_attr(all(nightly, feature="dev"), allow(match_bool))]
|
#![cfg_attr(feature="dev", allow(match_bool))]
|
||||||
// We use that to be more explicit about handled cases
|
// We use that to be more explicit about handled cases
|
||||||
#![cfg_attr(all(nightly, feature="dev"), allow(match_same_arms))]
|
#![cfg_attr(feature="dev", allow(match_same_arms))]
|
||||||
// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref.
|
// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref.
|
||||||
#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))]
|
#![cfg_attr(feature="dev", allow(clone_on_copy))]
|
||||||
|
|
||||||
//! Ethcore-util library
|
//! Ethcore-util library
|
||||||
//!
|
//!
|
||||||
|
@ -243,7 +243,7 @@ impl Discovery {
|
|||||||
self.send_to(packet, address.clone());
|
self.send_to(packet, address.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(map_clone))]
|
#[cfg_attr(feature="dev", allow(map_clone))]
|
||||||
fn nearest_node_entries(target: &NodeId, buckets: &[NodeBucket]) -> Vec<NodeEntry> {
|
fn nearest_node_entries(target: &NodeId, buckets: &[NodeBucket]) -> Vec<NodeEntry> {
|
||||||
let mut found: BTreeMap<u32, Vec<&NodeEntry>> = BTreeMap::new();
|
let mut found: BTreeMap<u32, Vec<&NodeEntry>> = BTreeMap::new();
|
||||||
let mut count = 0;
|
let mut count = 0;
|
||||||
|
@ -507,7 +507,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
|||||||
debug!(target: "network", "Connecting peers: {} sessions, {} pending", self.session_count(), self.handshake_count());
|
debug!(target: "network", "Connecting peers: {} sessions, {} pending", self.session_count(), self.handshake_count());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(single_match))]
|
#[cfg_attr(feature="dev", allow(single_match))]
|
||||||
fn connect_peer(&self, id: &NodeId, io: &IoContext<NetworkIoMessage<Message>>) {
|
fn connect_peer(&self, id: &NodeId, io: &IoContext<NetworkIoMessage<Message>>) {
|
||||||
if self.have_session(id)
|
if self.have_session(id)
|
||||||
{
|
{
|
||||||
@ -542,7 +542,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
|||||||
self.create_connection(socket, Some(id), io);
|
self.create_connection(socket, Some(id), io);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(block_in_if_condition_stmt))]
|
#[cfg_attr(feature="dev", allow(block_in_if_condition_stmt))]
|
||||||
fn create_connection(&self, socket: TcpStream, id: Option<&NodeId>, io: &IoContext<NetworkIoMessage<Message>>) {
|
fn create_connection(&self, socket: TcpStream, id: Option<&NodeId>, io: &IoContext<NetworkIoMessage<Message>>) {
|
||||||
let nonce = self.info.write().unwrap().next_nonce();
|
let nonce = self.info.write().unwrap().next_nonce();
|
||||||
let mut handshakes = self.handshakes.write().unwrap();
|
let mut handshakes = self.handshakes.write().unwrap();
|
||||||
|
@ -71,7 +71,7 @@ impl PanicHandler {
|
|||||||
|
|
||||||
/// Invoke closure and catch any possible panics.
|
/// Invoke closure and catch any possible panics.
|
||||||
/// In case of panic notifies all listeners about it.
|
/// In case of panic notifies all listeners about it.
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(deprecated))]
|
#[cfg_attr(feature="dev", allow(deprecated))]
|
||||||
pub fn catch_panic<G, R>(&self, g: G) -> thread::Result<R> where G: FnOnce() -> R + Send + 'static {
|
pub fn catch_panic<G, R>(&self, g: G) -> thread::Result<R> where G: FnOnce() -> R + Send + 'static {
|
||||||
let _guard = PanicGuard { handler: self };
|
let _guard = PanicGuard { handler: self };
|
||||||
let result = g();
|
let result = g();
|
||||||
|
@ -54,7 +54,7 @@ pub struct TrieDB<'db> {
|
|||||||
pub hash_count: usize,
|
pub hash_count: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))]
|
#[cfg_attr(feature="dev", allow(wrong_self_convention))]
|
||||||
impl<'db> TrieDB<'db> {
|
impl<'db> TrieDB<'db> {
|
||||||
/// Create a new trie with the backing database `db` and `root`
|
/// Create a new trie with the backing database `db` and `root`
|
||||||
/// Panics, if `root` does not exist
|
/// Panics, if `root` does not exist
|
||||||
|
@ -66,7 +66,7 @@ enum MaybeChanged<'a> {
|
|||||||
Changed(Bytes),
|
Changed(Bytes),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))]
|
#[cfg_attr(feature="dev", allow(wrong_self_convention))]
|
||||||
impl<'db> TrieDBMut<'db> {
|
impl<'db> TrieDBMut<'db> {
|
||||||
/// Create a new trie with the backing database `db` and empty `root`
|
/// Create a new trie with the backing database `db` and empty `root`
|
||||||
/// Initialise to the state entailed by the genesis block.
|
/// Initialise to the state entailed by the genesis block.
|
||||||
@ -350,7 +350,7 @@ impl<'db> TrieDBMut<'db> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
|
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
||||||
/// Determine the RLP of the node, assuming we're inserting `partial` into the
|
/// Determine the RLP of the node, assuming we're inserting `partial` into the
|
||||||
/// node currently of data `old`. This will *not* delete any hash of `old` from the database;
|
/// node currently of data `old`. This will *not* delete any hash of `old` from the database;
|
||||||
/// it will just return the new RLP that includes the new node.
|
/// it will just return the new RLP that includes the new node.
|
||||||
|
Loading…
Reference in New Issue
Block a user