Merge branch 'master' into dapps-content-bug

This commit is contained in:
Gav Wood 2017-04-07 18:26:55 +02:00 committed by GitHub
commit 81f48c0001
66 changed files with 2122 additions and 612 deletions

View File

@ -14,13 +14,14 @@ cache:
untracked: true untracked: true
linux-stable: linux-stable:
stage: build stage: build
image: ethcore/rust:stable image: parity/rust:gitlab-ci
only: only:
- beta - beta
- tags - tags
- stable - stable
- triggers - triggers
script: script:
- rustup default stable
- cargo build -j $(nproc) --release --features final $CARGOFLAGS - cargo build -j $(nproc) --release --features final $CARGOFLAGS
- cargo build -j $(nproc) --release -p evmbin - cargo build -j $(nproc) --release -p evmbin
- cargo build -j $(nproc) --release -p ethstore - cargo build -j $(nproc) --release -p ethstore
@ -105,13 +106,14 @@ linux-stable-debian:
name: "stable-x86_64-unknown-debian-gnu_parity" name: "stable-x86_64-unknown-debian-gnu_parity"
linux-beta: linux-beta:
stage: build stage: build
image: ethcore/rust:beta image: parity/rust:gitlab-ci
only: only:
- beta - beta
- tags - tags
- stable - stable
- triggers - triggers
script: script:
- rustup default beta
- cargo build -j $(nproc) --release $CARGOFLAGS - cargo build -j $(nproc) --release $CARGOFLAGS
- strip target/release/parity - strip target/release/parity
tags: tags:
@ -124,13 +126,14 @@ linux-beta:
allow_failure: true allow_failure: true
linux-nightly: linux-nightly:
stage: build stage: build
image: ethcore/rust:nightly image: parity/rust:gitlab-ci
only: only:
- beta - beta
- tags - tags
- stable - stable
- triggers - triggers
script: script:
- rustup default nightly
- cargo build -j $(nproc) --release $CARGOFLAGS - cargo build -j $(nproc) --release $CARGOFLAGS
- strip target/release/parity - strip target/release/parity
tags: tags:
@ -544,11 +547,12 @@ test-windows:
allow_failure: true allow_failure: true
test-rust-stable: test-rust-stable:
stage: test stage: test
image: ethcore/rust:stable image: parity/rust:gitlab-ci
before_script: before_script:
- git submodule update --init --recursive - git submodule update --init --recursive
- export RUST_FILES_MODIFIED=$(git --no-pager diff --name-only $CI_BUILD_REF^ $CI_BUILD_REF | grep -v -e ^js -e ^\\. -e ^LICENSE -e ^README.md -e ^appveyor.yml -e ^test.sh -e ^windows/ -e ^scripts/ -e^mac/ -e ^nsis/ | wc -l) - export RUST_FILES_MODIFIED=$(git --no-pager diff --name-only $CI_BUILD_REF^ $CI_BUILD_REF | grep -v -e ^js -e ^\\. -e ^LICENSE -e ^README.md -e ^appveyor.yml -e ^test.sh -e ^windows/ -e ^scripts/ -e^mac/ -e ^nsis/ | wc -l)
script: script:
- rustup show
- export RUST_BACKTRACE=1 - export RUST_BACKTRACE=1
- if [ $RUST_FILES_MODIFIED -eq 0 ]; then echo "Skipping Rust tests since no Rust files modified."; else ./test.sh $CARGOFLAGS; fi - if [ $RUST_FILES_MODIFIED -eq 0 ]; then echo "Skipping Rust tests since no Rust files modified."; else ./test.sh $CARGOFLAGS; fi
tags: tags:

4
Cargo.lock generated
View File

@ -27,6 +27,7 @@ dependencies = [
"ethsync 1.7.0", "ethsync 1.7.0",
"evmbin 0.1.0", "evmbin 0.1.0",
"fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
@ -533,6 +534,7 @@ name = "ethcore-light"
version = "1.7.0" version = "1.7.0"
dependencies = [ dependencies = [
"ethcore 1.7.0", "ethcore 1.7.0",
"ethcore-devtools 1.7.0",
"ethcore-io 1.7.0", "ethcore-io 1.7.0",
"ethcore-ipc 1.7.0", "ethcore-ipc 1.7.0",
"ethcore-ipc-codegen 1.7.0", "ethcore-ipc-codegen 1.7.0",
@ -1755,7 +1757,7 @@ dependencies = [
[[package]] [[package]]
name = "parity-ui-precompiled" name = "parity-ui-precompiled"
version = "1.4.0" version = "1.4.0"
source = "git+https://github.com/paritytech/js-precompiled.git#04143247380a7a9bce112c9467636684d8214973" source = "git+https://github.com/paritytech/js-precompiled.git#606f5bf9966b93fcc695e43d648bb9b9fe178c3f"
dependencies = [ dependencies = [
"parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]

View File

@ -23,6 +23,7 @@ toml = "0.2"
serde = "0.9" serde = "0.9"
serde_json = "0.9" serde_json = "0.9"
app_dirs = "1.1.1" app_dirs = "1.1.1"
futures = "0.1"
fdlimit = "0.1" fdlimit = "0.1"
ws2_32-sys = "0.2" ws2_32-sys = "0.2"
ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" } ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" }

View File

@ -17,6 +17,7 @@ ethcore-util = { path = "../../util" }
ethcore-network = { path = "../../util/network" } ethcore-network = { path = "../../util/network" }
ethcore-io = { path = "../../util/io" } ethcore-io = { path = "../../util/io" }
ethcore-ipc = { path = "../../ipc/rpc", optional = true } ethcore-ipc = { path = "../../ipc/rpc", optional = true }
ethcore-devtools = { path = "../../devtools" }
rlp = { path = "../../util/rlp" } rlp = { path = "../../util/rlp" }
time = "0.1" time = "0.1"
smallvec = "0.3.1" smallvec = "0.3.1"

View File

@ -23,9 +23,9 @@
//! This is separate from the `BlockChain` for two reasons: //! This is separate from the `BlockChain` for two reasons:
//! - It stores only headers (and a pruned subset of them) //! - It stores only headers (and a pruned subset of them)
//! - To allow for flexibility in the database layout once that's incorporated. //! - To allow for flexibility in the database layout once that's incorporated.
// TODO: use DB instead of memory. DB Layout: just the contents of `candidates`/`headers`
use std::collections::{BTreeMap, HashMap}; use std::collections::BTreeMap;
use std::sync::Arc;
use cht; use cht;
@ -34,7 +34,10 @@ use ethcore::error::BlockError;
use ethcore::encoded; use ethcore::encoded;
use ethcore::header::Header; use ethcore::header::Header;
use ethcore::ids::BlockId; use ethcore::ids::BlockId;
use util::{H256, U256, HeapSizeOf, Mutex, RwLock};
use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp, UntrustedRlp};
use util::{H256, U256, HeapSizeOf, RwLock};
use util::kvdb::{DBTransaction, KeyValueDB};
use smallvec::SmallVec; use smallvec::SmallVec;
@ -43,6 +46,9 @@ use smallvec::SmallVec;
/// relevant to any blocks we've got in memory. /// relevant to any blocks we've got in memory.
const HISTORY: u64 = 2048; const HISTORY: u64 = 2048;
/// The best block key. Maps to an RLP list: [best_era, last_era]
const CURRENT_KEY: &'static [u8] = &*b"best_and_latest";
/// Information about a block. /// Information about a block.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct BlockDescriptor { pub struct BlockDescriptor {
@ -75,42 +81,142 @@ impl HeapSizeOf for Entry {
} }
} }
impl Encodable for Entry {
fn rlp_append(&self, s: &mut RlpStream) {
s.begin_list(self.candidates.len());
for candidate in &self.candidates {
s.begin_list(3)
.append(&candidate.hash)
.append(&candidate.parent_hash)
.append(&candidate.total_difficulty);
}
}
}
impl Decodable for Entry {
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
let mut candidates = SmallVec::<[Candidate; 3]>::new();
for item in rlp.iter() {
candidates.push(Candidate {
hash: item.val_at(0)?,
parent_hash: item.val_at(1)?,
total_difficulty: item.val_at(2)?,
})
}
if candidates.is_empty() { return Err(DecoderError::Custom("Empty candidates vector submitted.")) }
// rely on the invariant that the canonical entry is always first.
let canon_hash = candidates[0].hash;
Ok(Entry {
candidates: candidates,
canonical_hash: canon_hash,
})
}
}
fn cht_key(number: u64) -> String {
format!("{:08x}_canonical", number)
}
fn era_key(number: u64) -> String {
format!("candidates_{}", number)
}
/// Pending changes from `insert` to be applied after the database write has finished.
pub struct PendingChanges {
best_block: Option<BlockDescriptor>, // new best block.
}
/// Header chain. See module docs for more details. /// Header chain. See module docs for more details.
pub struct HeaderChain { pub struct HeaderChain {
genesis_header: encoded::Header, // special-case the genesis. genesis_header: encoded::Header, // special-case the genesis.
candidates: RwLock<BTreeMap<u64, Entry>>, candidates: RwLock<BTreeMap<u64, Entry>>,
headers: RwLock<HashMap<H256, encoded::Header>>,
best_block: RwLock<BlockDescriptor>, best_block: RwLock<BlockDescriptor>,
cht_roots: Mutex<Vec<H256>>, db: Arc<KeyValueDB>,
col: Option<u32>,
} }
impl HeaderChain { impl HeaderChain {
/// Create a new header chain given this genesis block. /// Create a new header chain given this genesis block and database to read from.
pub fn new(genesis: &[u8]) -> Self { pub fn new(db: Arc<KeyValueDB>, col: Option<u32>, genesis: &[u8]) -> Result<Self, String> {
use ethcore::views::HeaderView; use ethcore::views::HeaderView;
let g_view = HeaderView::new(genesis); let chain = if let Some(current) = db.get(col, CURRENT_KEY)? {
let (best_number, highest_number) = {
let rlp = Rlp::new(&current);
(rlp.val_at(0), rlp.val_at(1))
};
HeaderChain { let mut cur_number = highest_number;
genesis_header: encoded::Header::new(genesis.to_owned()), let mut candidates = BTreeMap::new();
best_block: RwLock::new(BlockDescriptor {
hash: g_view.hash(), // load all era entries and referenced headers within them.
number: 0, while let Some(entry) = db.get(col, era_key(cur_number).as_bytes())? {
total_difficulty: g_view.difficulty(), let entry: Entry = ::rlp::decode(&entry);
}), trace!(target: "chain", "loaded header chain entry for era {} with {} candidates",
candidates: RwLock::new(BTreeMap::new()), cur_number, entry.candidates.len());
headers: RwLock::new(HashMap::new()),
cht_roots: Mutex::new(Vec::new()), candidates.insert(cur_number, entry);
}
cur_number -= 1;
}
// fill best block block descriptor.
let best_block = {
let era = match candidates.get(&best_number) {
Some(era) => era,
None => return Err(format!("Database corrupt: highest block referenced but no data.")),
};
let best = &era.candidates[0];
BlockDescriptor {
hash: best.hash,
number: best_number,
total_difficulty: best.total_difficulty,
}
};
HeaderChain {
genesis_header: encoded::Header::new(genesis.to_owned()),
best_block: RwLock::new(best_block),
candidates: RwLock::new(candidates),
db: db,
col: col,
}
} else {
let g_view = HeaderView::new(genesis);
HeaderChain {
genesis_header: encoded::Header::new(genesis.to_owned()),
best_block: RwLock::new(BlockDescriptor {
hash: g_view.hash(),
number: 0,
total_difficulty: g_view.difficulty(),
}),
candidates: RwLock::new(BTreeMap::new()),
db: db,
col: col,
}
};
Ok(chain)
} }
/// Insert a pre-verified header. /// Insert a pre-verified header.
/// ///
/// This blindly trusts that the data given to it is sensible. /// This blindly trusts that the data given to it is sensible.
pub fn insert(&self, header: Header) -> Result<(), BlockError> { /// Returns a set of pending changes to be applied with `apply_pending`
/// before the next call to insert and after the transaction has been written.
pub fn insert(&self, transaction: &mut DBTransaction, header: Header) -> Result<PendingChanges, BlockError> {
let hash = header.hash(); let hash = header.hash();
let number = header.number(); let number = header.number();
let parent_hash = *header.parent_hash(); let parent_hash = *header.parent_hash();
let mut pending = PendingChanges {
best_block: None,
};
// hold candidates the whole time to guard import order. // hold candidates the whole time to guard import order.
let mut candidates = self.candidates.write(); let mut candidates = self.candidates.write();
@ -128,20 +234,41 @@ impl HeaderChain {
let total_difficulty = parent_td + *header.difficulty(); let total_difficulty = parent_td + *header.difficulty();
// insert headers and candidates entries. // insert headers and candidates entries and write era to disk.
candidates.entry(number).or_insert_with(|| Entry { candidates: SmallVec::new(), canonical_hash: hash }) {
.candidates.push(Candidate { let cur_era = candidates.entry(number)
.or_insert_with(|| Entry { candidates: SmallVec::new(), canonical_hash: hash });
cur_era.candidates.push(Candidate {
hash: hash, hash: hash,
parent_hash: parent_hash, parent_hash: parent_hash,
total_difficulty: total_difficulty, total_difficulty: total_difficulty,
}); });
let raw = ::rlp::encode(&header).to_vec(); // fix ordering of era before writing.
self.headers.write().insert(hash, encoded::Header::new(raw)); if total_difficulty > cur_era.candidates[0].total_difficulty {
let cur_pos = cur_era.candidates.len() - 1;
cur_era.candidates.swap(cur_pos, 0);
cur_era.canonical_hash = hash;
}
transaction.put(self.col, era_key(number).as_bytes(), &::rlp::encode(&*cur_era))
}
let raw = ::rlp::encode(&header);
transaction.put(self.col, &hash[..], &*raw);
let (best_num, is_new_best) = {
let cur_best = self.best_block.read();
if cur_best.total_difficulty < total_difficulty {
(number, true)
} else {
(cur_best.number, false)
}
};
// reorganize ancestors so canonical entries are first in their // reorganize ancestors so canonical entries are first in their
// respective candidates vectors. // respective candidates vectors.
if self.best_block.read().total_difficulty < total_difficulty { if is_new_best {
let mut canon_hash = hash; let mut canon_hash = hash;
for (&height, entry) in candidates.iter_mut().rev().skip_while(|&(height, _)| *height > number) { for (&height, entry) in candidates.iter_mut().rev().skip_while(|&(height, _)| *height > number) {
if height != number && entry.canonical_hash == canon_hash { break; } if height != number && entry.canonical_hash == canon_hash { break; }
@ -160,23 +287,26 @@ impl HeaderChain {
// what about reorgs > cht::SIZE + HISTORY? // what about reorgs > cht::SIZE + HISTORY?
// resetting to the last block of a given CHT should be possible. // resetting to the last block of a given CHT should be possible.
canon_hash = entry.candidates[0].parent_hash; canon_hash = entry.candidates[0].parent_hash;
// write altered era to disk
if height != number {
let rlp_era = ::rlp::encode(&*entry);
transaction.put(self.col, era_key(height).as_bytes(), &rlp_era);
}
} }
trace!(target: "chain", "New best block: ({}, {}), TD {}", number, hash, total_difficulty); trace!(target: "chain", "New best block: ({}, {}), TD {}", number, hash, total_difficulty);
*self.best_block.write() = BlockDescriptor { pending.best_block = Some(BlockDescriptor {
hash: hash, hash: hash,
number: number, number: number,
total_difficulty: total_difficulty, total_difficulty: total_difficulty,
}; });
// produce next CHT root if it's time. // produce next CHT root if it's time.
let earliest_era = *candidates.keys().next().expect("at least one era just created; qed"); let earliest_era = *candidates.keys().next().expect("at least one era just created; qed");
if earliest_era + HISTORY + cht::SIZE <= number { if earliest_era + HISTORY + cht::SIZE <= number {
let cht_num = cht::block_to_cht_number(earliest_era) let cht_num = cht::block_to_cht_number(earliest_era)
.expect("fails only for number == 0; genesis never imported; qed"); .expect("fails only for number == 0; genesis never imported; qed");
debug_assert_eq!(cht_num as usize, self.cht_roots.lock().len());
let mut headers = self.headers.write();
let cht_root = { let cht_root = {
let mut i = earliest_era; let mut i = earliest_era;
@ -186,10 +316,12 @@ impl HeaderChain {
let iter = || { let iter = || {
let era_entry = candidates.remove(&i) let era_entry = candidates.remove(&i)
.expect("all eras are sequential with no gaps; qed"); .expect("all eras are sequential with no gaps; qed");
transaction.delete(self.col, era_key(i).as_bytes());
i += 1; i += 1;
for ancient in &era_entry.candidates { for ancient in &era_entry.candidates {
headers.remove(&ancient.hash); transaction.delete(self.col, &ancient.hash);
} }
let canon = &era_entry.candidates[0]; let canon = &era_entry.candidates[0];
@ -199,28 +331,56 @@ impl HeaderChain {
.expect("fails only when too few items; this is checked; qed") .expect("fails only when too few items; this is checked; qed")
}; };
// write the CHT root to the database.
debug!(target: "chain", "Produced CHT {} root: {:?}", cht_num, cht_root); debug!(target: "chain", "Produced CHT {} root: {:?}", cht_num, cht_root);
transaction.put(self.col, cht_key(cht_num).as_bytes(), &::rlp::encode(&cht_root));
self.cht_roots.lock().push(cht_root);
} }
} }
Ok(()) // write the best and latest eras to the database.
{
let latest_num = *candidates.iter().rev().next().expect("at least one era just inserted; qed").0;
let mut stream = RlpStream::new_list(2);
stream.append(&best_num).append(&latest_num);
transaction.put(self.col, CURRENT_KEY, &stream.out())
}
Ok(pending)
}
/// Apply pending changes from a previous `insert` operation.
/// Must be done before the next `insert` call.
pub fn apply_pending(&self, pending: PendingChanges) {
if let Some(best_block) = pending.best_block {
*self.best_block.write() = best_block;
}
} }
/// Get a block header. In the case of query by number, only canonical blocks /// Get a block header. In the case of query by number, only canonical blocks
/// will be returned. /// will be returned.
pub fn block_header(&self, id: BlockId) -> Option<encoded::Header> { pub fn block_header(&self, id: BlockId) -> Option<encoded::Header> {
let load_from_db = |hash: H256| {
match self.db.get(self.col, &hash) {
Ok(val) => val.map(|x| x.to_vec()).map(encoded::Header::new),
Err(e) => {
warn!(target: "chain", "Failed to read from database: {}", e);
None
}
}
};
match id { match id {
BlockId::Earliest | BlockId::Number(0) => Some(self.genesis_header.clone()), BlockId::Earliest | BlockId::Number(0) => Some(self.genesis_header.clone()),
BlockId::Hash(hash) => self.headers.read().get(&hash).cloned(), BlockId::Hash(hash) => load_from_db(hash),
BlockId::Number(num) => { BlockId::Number(num) => {
if self.best_block.read().number < num { return None } if self.best_block.read().number < num { return None }
self.candidates.read().get(&num).map(|entry| entry.canonical_hash) self.candidates.read().get(&num).map(|entry| entry.canonical_hash)
.and_then(|hash| self.headers.read().get(&hash).cloned()) .and_then(load_from_db)
} }
BlockId::Latest | BlockId::Pending => { BlockId::Latest | BlockId::Pending => {
// hold candidates hear to prevent deletion of the header
// as we read it.
let _candidates = self.candidates.read();
let hash = { let hash = {
let best = self.best_block.read(); let best = self.best_block.read();
if best.number == 0 { if best.number == 0 {
@ -230,7 +390,7 @@ impl HeaderChain {
best.hash best.hash
}; };
self.headers.read().get(&hash).cloned() load_from_db(hash)
} }
} }
} }
@ -257,7 +417,13 @@ impl HeaderChain {
/// This is because it's assumed that the genesis hash is known, /// This is because it's assumed that the genesis hash is known,
/// so including it within a CHT would be redundant. /// so including it within a CHT would be redundant.
pub fn cht_root(&self, n: usize) -> Option<H256> { pub fn cht_root(&self, n: usize) -> Option<H256> {
self.cht_roots.lock().get(n).map(|h| h.clone()) match self.db.get(self.col, cht_key(n as u64).as_bytes()) {
Ok(val) => val.map(|x| ::rlp::decode(&x)),
Err(e) => {
warn!(target: "chain", "Error reading from database: {}", e);
None
}
}
} }
/// Get the genesis hash. /// Get the genesis hash.
@ -287,7 +453,7 @@ impl HeaderChain {
/// Get block status. /// Get block status.
pub fn status(&self, hash: &H256) -> BlockStatus { pub fn status(&self, hash: &H256) -> BlockStatus {
match self.headers.read().contains_key(hash) { match self.db.get(self.col, &*hash).ok().map_or(false, |x| x.is_some()) {
true => BlockStatus::InChain, true => BlockStatus::InChain,
false => BlockStatus::Unknown, false => BlockStatus::Unknown,
} }
@ -296,9 +462,7 @@ impl HeaderChain {
impl HeapSizeOf for HeaderChain { impl HeapSizeOf for HeaderChain {
fn heap_size_of_children(&self) -> usize { fn heap_size_of_children(&self) -> usize {
self.candidates.read().heap_size_of_children() + self.candidates.read().heap_size_of_children()
self.headers.read().heap_size_of_children() +
self.cht_roots.lock().heap_size_of_children()
} }
} }
@ -324,16 +488,23 @@ impl<'a> Iterator for AncestryIter<'a> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::HeaderChain; use super::HeaderChain;
use std::sync::Arc;
use ethcore::ids::BlockId; use ethcore::ids::BlockId;
use ethcore::header::Header; use ethcore::header::Header;
use ethcore::spec::Spec; use ethcore::spec::Spec;
fn make_db() -> Arc<::util::KeyValueDB> {
Arc::new(::util::kvdb::in_memory(0))
}
#[test] #[test]
fn basic_chain() { fn basic_chain() {
let spec = Spec::new_test(); let spec = Spec::new_test();
let genesis_header = spec.genesis_header(); let genesis_header = spec.genesis_header();
let db = make_db();
let chain = HeaderChain::new(&::rlp::encode(&genesis_header)); let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap();
let mut parent_hash = genesis_header.hash(); let mut parent_hash = genesis_header.hash();
let mut rolling_timestamp = genesis_header.timestamp(); let mut rolling_timestamp = genesis_header.timestamp();
@ -345,7 +516,10 @@ mod tests {
header.set_difficulty(*genesis_header.difficulty() * i.into()); header.set_difficulty(*genesis_header.difficulty() * i.into());
parent_hash = header.hash(); parent_hash = header.hash();
chain.insert(header).unwrap(); let mut tx = db.transaction();
let pending = chain.insert(&mut tx, header).unwrap();
db.write(tx).unwrap();
chain.apply_pending(pending);
rolling_timestamp += 10; rolling_timestamp += 10;
} }
@ -361,7 +535,8 @@ mod tests {
let spec = Spec::new_test(); let spec = Spec::new_test();
let genesis_header = spec.genesis_header(); let genesis_header = spec.genesis_header();
let chain = HeaderChain::new(&::rlp::encode(&genesis_header)); let db = make_db();
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap();
let mut parent_hash = genesis_header.hash(); let mut parent_hash = genesis_header.hash();
let mut rolling_timestamp = genesis_header.timestamp(); let mut rolling_timestamp = genesis_header.timestamp();
@ -373,7 +548,10 @@ mod tests {
header.set_difficulty(*genesis_header.difficulty() * i.into()); header.set_difficulty(*genesis_header.difficulty() * i.into());
parent_hash = header.hash(); parent_hash = header.hash();
chain.insert(header).unwrap(); let mut tx = db.transaction();
let pending = chain.insert(&mut tx, header).unwrap();
db.write(tx).unwrap();
chain.apply_pending(pending);
rolling_timestamp += 10; rolling_timestamp += 10;
} }
@ -389,7 +567,10 @@ mod tests {
header.set_difficulty(*genesis_header.difficulty() * i.into()); header.set_difficulty(*genesis_header.difficulty() * i.into());
parent_hash = header.hash(); parent_hash = header.hash();
chain.insert(header).unwrap(); let mut tx = db.transaction();
let pending = chain.insert(&mut tx, header).unwrap();
db.write(tx).unwrap();
chain.apply_pending(pending);
rolling_timestamp += 10; rolling_timestamp += 10;
} }
@ -410,7 +591,10 @@ mod tests {
header.set_difficulty(*genesis_header.difficulty() * (i * i).into()); header.set_difficulty(*genesis_header.difficulty() * (i * i).into());
parent_hash = header.hash(); parent_hash = header.hash();
chain.insert(header).unwrap(); let mut tx = db.transaction();
let pending = chain.insert(&mut tx, header).unwrap();
db.write(tx).unwrap();
chain.apply_pending(pending);
rolling_timestamp += 11; rolling_timestamp += 11;
} }
@ -432,11 +616,101 @@ mod tests {
fn earliest_is_latest() { fn earliest_is_latest() {
let spec = Spec::new_test(); let spec = Spec::new_test();
let genesis_header = spec.genesis_header(); let genesis_header = spec.genesis_header();
let db = make_db();
let chain = HeaderChain::new(&::rlp::encode(&genesis_header)); let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap();
assert!(chain.block_header(BlockId::Earliest).is_some()); assert!(chain.block_header(BlockId::Earliest).is_some());
assert!(chain.block_header(BlockId::Latest).is_some()); assert!(chain.block_header(BlockId::Latest).is_some());
assert!(chain.block_header(BlockId::Pending).is_some()); assert!(chain.block_header(BlockId::Pending).is_some());
} }
#[test]
fn restore_from_db() {
let spec = Spec::new_test();
let genesis_header = spec.genesis_header();
let db = make_db();
{
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap();
let mut parent_hash = genesis_header.hash();
let mut rolling_timestamp = genesis_header.timestamp();
for i in 1..10000 {
let mut header = Header::new();
header.set_parent_hash(parent_hash);
header.set_number(i);
header.set_timestamp(rolling_timestamp);
header.set_difficulty(*genesis_header.difficulty() * i.into());
parent_hash = header.hash();
let mut tx = db.transaction();
let pending = chain.insert(&mut tx, header).unwrap();
db.write(tx).unwrap();
chain.apply_pending(pending);
rolling_timestamp += 10;
}
}
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap();
assert!(chain.block_header(BlockId::Number(10)).is_none());
assert!(chain.block_header(BlockId::Number(9000)).is_some());
assert!(chain.cht_root(2).is_some());
assert!(chain.cht_root(3).is_none());
assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 9999);
}
#[test]
fn restore_higher_non_canonical() {
let spec = Spec::new_test();
let genesis_header = spec.genesis_header();
let db = make_db();
{
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap();
let mut parent_hash = genesis_header.hash();
let mut rolling_timestamp = genesis_header.timestamp();
// push 100 low-difficulty blocks.
for i in 1..101 {
let mut header = Header::new();
header.set_parent_hash(parent_hash);
header.set_number(i);
header.set_timestamp(rolling_timestamp);
header.set_difficulty(*genesis_header.difficulty() * i.into());
parent_hash = header.hash();
let mut tx = db.transaction();
let pending = chain.insert(&mut tx, header).unwrap();
db.write(tx).unwrap();
chain.apply_pending(pending);
rolling_timestamp += 10;
}
// push fewer high-difficulty blocks.
for i in 1..11 {
let mut header = Header::new();
header.set_parent_hash(parent_hash);
header.set_number(i);
header.set_timestamp(rolling_timestamp);
header.set_difficulty(*genesis_header.difficulty() * i.into() * 1000.into());
parent_hash = header.hash();
let mut tx = db.transaction();
let pending = chain.insert(&mut tx, header).unwrap();
db.write(tx).unwrap();
chain.apply_pending(pending);
rolling_timestamp += 10;
}
assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10);
}
// after restoration, non-canonical eras should still be loaded.
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap();
assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10);
assert!(chain.candidates.read().get(&100).is_some())
}
} }

View File

@ -32,6 +32,7 @@ use ethcore::encoded;
use io::IoChannel; use io::IoChannel;
use util::{H256, Mutex, RwLock}; use util::{H256, Mutex, RwLock};
use util::kvdb::{KeyValueDB, CompactionProfile};
use self::header_chain::{AncestryIter, HeaderChain}; use self::header_chain::{AncestryIter, HeaderChain};
@ -45,6 +46,14 @@ mod service;
pub struct Config { pub struct Config {
/// Verification queue config. /// Verification queue config.
pub queue: queue::Config, pub queue: queue::Config,
/// Chain column in database.
pub chain_column: Option<u32>,
/// Database cache size. `None` => rocksdb default.
pub db_cache_size: Option<usize>,
/// State db compaction profile
pub db_compaction: CompactionProfile,
/// Should db have WAL enabled?
pub db_wal: bool,
} }
/// Trait for interacting with the header chain abstractly. /// Trait for interacting with the header chain abstractly.
@ -106,18 +115,30 @@ pub struct Client {
chain: HeaderChain, chain: HeaderChain,
report: RwLock<ClientReport>, report: RwLock<ClientReport>,
import_lock: Mutex<()>, import_lock: Mutex<()>,
db: Arc<KeyValueDB>,
} }
impl Client { impl Client {
/// Create a new `Client`. /// Create a new `Client`.
pub fn new(config: Config, spec: &Spec, io_channel: IoChannel<ClientIoMessage>) -> Self { pub fn new(config: Config, db: Arc<KeyValueDB>, chain_col: Option<u32>, spec: &Spec, io_channel: IoChannel<ClientIoMessage>) -> Result<Self, String> {
Client { let gh = ::rlp::encode(&spec.genesis_header());
Ok(Client {
queue: HeaderQueue::new(config.queue, spec.engine.clone(), io_channel, true), queue: HeaderQueue::new(config.queue, spec.engine.clone(), io_channel, true),
engine: spec.engine.clone(), engine: spec.engine.clone(),
chain: HeaderChain::new(&::rlp::encode(&spec.genesis_header())), chain: HeaderChain::new(db.clone(), chain_col, &gh)?,
report: RwLock::new(ClientReport::default()), report: RwLock::new(ClientReport::default()),
import_lock: Mutex::new(()), import_lock: Mutex::new(()),
} db: db,
})
}
/// Create a new `Client` backed purely in-memory.
/// This will ignore all database options in the configuration.
pub fn in_memory(config: Config, spec: &Spec, io_channel: IoChannel<ClientIoMessage>) -> Self {
let db = ::util::kvdb::in_memory(0);
Client::new(config, Arc::new(db), None, spec, io_channel).expect("New DB creation infallible; qed")
} }
/// Import a header to the queue for additional verification. /// Import a header to the queue for additional verification.
@ -201,15 +222,23 @@ impl Client {
for verified_header in self.queue.drain(MAX) { for verified_header in self.queue.drain(MAX) {
let (num, hash) = (verified_header.number(), verified_header.hash()); let (num, hash) = (verified_header.number(), verified_header.hash());
match self.chain.insert(verified_header) { let mut tx = self.db.transaction();
Ok(()) => { let pending = match self.chain.insert(&mut tx, verified_header) {
Ok(pending) => {
good.push(hash); good.push(hash);
self.report.write().blocks_imported += 1; self.report.write().blocks_imported += 1;
pending
} }
Err(e) => { Err(e) => {
debug!(target: "client", "Error importing header {:?}: {}", (num, hash), e); debug!(target: "client", "Error importing header {:?}: {}", (num, hash), e);
bad.push(hash); bad.push(hash);
break;
} }
};
self.db.write_buffered(tx);
self.chain.apply_pending(pending);
if let Err(e) = self.db.flush() {
panic!("Database flush failed: {}. Check disk health and space.", e);
} }
} }

View File

@ -17,33 +17,80 @@
//! Minimal IO service for light client. //! Minimal IO service for light client.
//! Just handles block import messages and passes them to the client. //! Just handles block import messages and passes them to the client.
use std::fmt;
use std::path::Path;
use std::sync::Arc; use std::sync::Arc;
use ethcore::db;
use ethcore::service::ClientIoMessage; use ethcore::service::ClientIoMessage;
use ethcore::spec::Spec; use ethcore::spec::Spec;
use io::{IoContext, IoError, IoHandler, IoService}; use io::{IoContext, IoError, IoHandler, IoService};
use util::kvdb::{Database, DatabaseConfig};
use super::{Client, Config as ClientConfig}; use super::{Client, Config as ClientConfig};
/// Errors on service initialization.
#[derive(Debug)]
pub enum Error {
/// Database error.
Database(String),
/// I/O service error.
Io(IoError),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::Database(ref msg) => write!(f, "Database error: {}", msg),
Error::Io(ref err) => write!(f, "I/O service error: {}", err),
}
}
}
/// Light client service. /// Light client service.
pub struct Service { pub struct Service {
client: Arc<Client>, client: Arc<Client>,
_io_service: IoService<ClientIoMessage>, io_service: IoService<ClientIoMessage>,
} }
impl Service { impl Service {
/// Start the service: initialize I/O workers and client itself. /// Start the service: initialize I/O workers and client itself.
pub fn start(config: ClientConfig, spec: &Spec) -> Result<Self, IoError> { pub fn start(config: ClientConfig, spec: &Spec, path: &Path) -> Result<Self, Error> {
let io_service = try!(IoService::<ClientIoMessage>::start()); // initialize database.
let client = Arc::new(Client::new(config, spec, io_service.channel())); let mut db_config = DatabaseConfig::with_columns(db::NUM_COLUMNS);
try!(io_service.register_handler(Arc::new(ImportBlocks(client.clone()))));
// give all rocksdb cache to the header chain column.
if let Some(size) = config.db_cache_size {
db_config.set_cache(db::COL_LIGHT_CHAIN, size);
}
db_config.compaction = config.db_compaction;
db_config.wal = config.db_wal;
let db = Arc::new(Database::open(
&db_config,
&path.to_str().expect("DB path could not be converted to string.")
).map_err(Error::Database)?);
let io_service = IoService::<ClientIoMessage>::start().map_err(Error::Io)?;
let client = Arc::new(Client::new(config,
db,
db::COL_LIGHT_CHAIN,
spec,
io_service.channel(),
).map_err(Error::Database)?);
io_service.register_handler(Arc::new(ImportBlocks(client.clone()))).map_err(Error::Io)?;
Ok(Service { Ok(Service {
client: client, client: client,
_io_service: io_service, io_service: io_service,
}) })
} }
/// Register an I/O handler on the service.
pub fn register_handler(&self, handler: Arc<IoHandler<ClientIoMessage> + Send>) -> Result<(), IoError> {
self.io_service.register_handler(handler)
}
/// Get a handle to the client. /// Get a handle to the client.
pub fn client(&self) -> &Arc<Client> { pub fn client(&self) -> &Arc<Client> {
&self.client &self.client
@ -63,11 +110,13 @@ impl IoHandler<ClientIoMessage> for ImportBlocks {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::Service; use super::Service;
use devtools::RandomTempPath;
use ethcore::spec::Spec; use ethcore::spec::Spec;
#[test] #[test]
fn it_works() { fn it_works() {
let spec = Spec::new_test(); let spec = Spec::new_test();
Service::start(Default::default(), &spec).unwrap(); let temp_path = RandomTempPath::new();
Service::start(Default::default(), &spec, temp_path.as_path()).unwrap();
} }
} }

View File

@ -55,6 +55,7 @@ pub mod remote {
mod types; mod types;
pub use self::cache::Cache;
pub use self::provider::Provider; pub use self::provider::Provider;
pub use self::transaction_queue::TransactionQueue; pub use self::transaction_queue::TransactionQueue;
pub use types::request as request; pub use types::request as request;
@ -76,3 +77,6 @@ extern crate stats;
#[cfg(feature = "ipc")] #[cfg(feature = "ipc")]
extern crate ethcore_ipc as ipc; extern crate ethcore_ipc as ipc;
#[cfg(test)]
extern crate ethcore_devtools as devtools;

View File

@ -61,10 +61,12 @@ impl<'a> IoContext for NetworkContext<'a> {
} }
fn disconnect_peer(&self, peer: PeerId) { fn disconnect_peer(&self, peer: PeerId) {
trace!(target: "pip", "Initiating disconnect of peer {}", peer);
NetworkContext::disconnect_peer(self, peer); NetworkContext::disconnect_peer(self, peer);
} }
fn disable_peer(&self, peer: PeerId) { fn disable_peer(&self, peer: PeerId) {
trace!(target: "pip", "Initiating disable of peer {}", peer);
NetworkContext::disable_peer(self, peer); NetworkContext::disable_peer(self, peer);
} }

View File

@ -27,7 +27,7 @@ use util::hash::H256;
use util::{DBValue, Mutex, RwLock, U256}; use util::{DBValue, Mutex, RwLock, U256};
use time::{Duration, SteadyTime}; use time::{Duration, SteadyTime};
use std::collections::HashMap; use std::collections::{HashMap, HashSet};
use std::fmt; use std::fmt;
use std::sync::Arc; use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
@ -61,6 +61,9 @@ const TIMEOUT_INTERVAL_MS: u64 = 1000;
const TICK_TIMEOUT: TimerToken = 1; const TICK_TIMEOUT: TimerToken = 1;
const TICK_TIMEOUT_INTERVAL_MS: u64 = 5000; const TICK_TIMEOUT_INTERVAL_MS: u64 = 5000;
const PROPAGATE_TIMEOUT: TimerToken = 2;
const PROPAGATE_TIMEOUT_INTERVAL_MS: u64 = 5000;
// minimum interval between updates. // minimum interval between updates.
const UPDATE_INTERVAL_MS: i64 = 5000; const UPDATE_INTERVAL_MS: i64 = 5000;
@ -132,6 +135,7 @@ pub struct Peer {
last_update: SteadyTime, last_update: SteadyTime,
pending_requests: RequestSet, pending_requests: RequestSet,
failed_requests: Vec<ReqId>, failed_requests: Vec<ReqId>,
propagated_transactions: HashSet<H256>,
} }
/// A light protocol event handler. /// A light protocol event handler.
@ -303,12 +307,18 @@ impl LightProtocol {
match peer.remote_flow { match peer.remote_flow {
None => Err(Error::NotServer), None => Err(Error::NotServer),
Some((ref mut creds, ref params)) => { Some((ref mut creds, ref params)) => {
// check that enough credits are available. // apply recharge to credits if there's no pending requests.
let mut temp_creds: Credits = creds.clone(); if peer.pending_requests.is_empty() {
for request in requests.requests() { params.recharge(creds);
temp_creds.deduct_cost(params.compute_cost(request))?;
} }
*creds = temp_creds;
// compute and deduct cost.
let pre_creds = creds.current();
let cost = params.compute_cost_multi(requests.requests());
creds.deduct_cost(cost)?;
trace!(target: "pip", "requesting from peer {}. Cost: {}; Available: {}",
peer_id, cost, pre_creds);
let req_id = ReqId(self.req_id.fetch_add(1, Ordering::SeqCst)); let req_id = ReqId(self.req_id.fetch_add(1, Ordering::SeqCst));
io.send(*peer_id, packet::REQUEST, { io.send(*peer_id, packet::REQUEST, {
@ -318,7 +328,7 @@ impl LightProtocol {
}); });
// begin timeout. // begin timeout.
peer.pending_requests.insert(req_id, requests, SteadyTime::now()); peer.pending_requests.insert(req_id, requests, cost, SteadyTime::now());
Ok(req_id) Ok(req_id)
} }
} }
@ -401,20 +411,25 @@ impl LightProtocol {
let req_id = ReqId(raw.val_at(0)?); let req_id = ReqId(raw.val_at(0)?);
let cur_credits: U256 = raw.val_at(1)?; let cur_credits: U256 = raw.val_at(1)?;
trace!(target: "pip", "pre-verifying response from peer {}", peer); trace!(target: "pip", "pre-verifying response for {} from peer {}", req_id, peer);
let peers = self.peers.read(); let peers = self.peers.read();
let res = match peers.get(peer) { let res = match peers.get(peer) {
Some(peer_info) => { Some(peer_info) => {
let mut peer_info = peer_info.lock(); let mut peer_info = peer_info.lock();
let req_info = peer_info.pending_requests.remove(&req_id, SteadyTime::now()); let req_info = peer_info.pending_requests.remove(&req_id, SteadyTime::now());
let cumulative_cost = peer_info.pending_requests.cumulative_cost();
let flow_info = peer_info.remote_flow.as_mut(); let flow_info = peer_info.remote_flow.as_mut();
match (req_info, flow_info) { match (req_info, flow_info) {
(Some(_), Some(flow_info)) => { (Some(_), Some(flow_info)) => {
let &mut (ref mut c, ref mut flow) = flow_info; let &mut (ref mut c, ref mut flow) = flow_info;
let actual_credits = ::std::cmp::min(cur_credits, *flow.limit());
c.update_to(actual_credits); // only update if the cumulative cost of the request set is zero.
if cumulative_cost == 0.into() {
let actual_credits = ::std::cmp::min(cur_credits, *flow.limit());
c.update_to(actual_credits);
}
Ok(()) Ok(())
} }
@ -488,6 +503,47 @@ impl LightProtocol {
} }
} }
// propagate transactions to relay peers.
// if we aren't on the mainnet, we just propagate to all relay peers
fn propagate_transactions(&self, io: &IoContext) {
if self.capabilities.read().tx_relay { return }
let ready_transactions = self.provider.ready_transactions();
if ready_transactions.is_empty() { return }
trace!(target: "pip", "propagate transactions: {} ready", ready_transactions.len());
let all_transaction_hashes: HashSet<_> = ready_transactions.iter().map(|tx| tx.hash()).collect();
let mut buf = Vec::new();
let peers = self.peers.read();
for (peer_id, peer_info) in peers.iter() {
let mut peer_info = peer_info.lock();
if !peer_info.capabilities.tx_relay { continue }
let prop_filter = &mut peer_info.propagated_transactions;
*prop_filter = &*prop_filter & &all_transaction_hashes;
// fill the buffer with all non-propagated transactions.
let to_propagate = ready_transactions.iter()
.filter(|tx| prop_filter.insert(tx.hash()))
.map(|tx| &tx.transaction);
buf.extend(to_propagate);
// propagate to the given peer.
if buf.is_empty() { continue }
io.send(*peer_id, packet::SEND_TRANSACTIONS, {
let mut stream = RlpStream::new_list(buf.len());
for pending_tx in buf.drain(..) {
stream.append(pending_tx);
}
stream.out()
})
}
}
/// called when a peer connects. /// called when a peer connects.
pub fn on_connect(&self, peer: &PeerId, io: &IoContext) { pub fn on_connect(&self, peer: &PeerId, io: &IoContext) {
let proto_version = match io.protocol_version(*peer).ok_or(Error::WrongNetwork) { let proto_version = match io.protocol_version(*peer).ok_or(Error::WrongNetwork) {
@ -520,6 +576,7 @@ impl LightProtocol {
last_update: SteadyTime::now(), last_update: SteadyTime::now(),
}); });
trace!(target: "pip", "Sending status to peer {}", peer);
io.send(*peer, packet::STATUS, status_packet); io.send(*peer, packet::STATUS, status_packet);
} }
@ -601,6 +658,7 @@ impl LightProtocol {
last_update: pending.last_update, last_update: pending.last_update,
pending_requests: RequestSet::default(), pending_requests: RequestSet::default(),
failed_requests: Vec::new(), failed_requests: Vec::new(),
propagated_transactions: HashSet::new(),
})); }));
for handler in &self.handlers { for handler in &self.handlers {
@ -683,6 +741,8 @@ impl LightProtocol {
trace!(target: "pip", "Received requests (id: {}) from peer {}", req_id, peer_id); trace!(target: "pip", "Received requests (id: {}) from peer {}", req_id, peer_id);
// deserialize requests, check costs and request validity. // deserialize requests, check costs and request validity.
self.flow_params.recharge(&mut peer.local_credits);
peer.local_credits.deduct_cost(self.flow_params.base_cost())?; peer.local_credits.deduct_cost(self.flow_params.base_cost())?;
for request_rlp in raw.at(1)?.iter().take(MAX_REQUESTS) { for request_rlp in raw.at(1)?.iter().take(MAX_REQUESTS) {
let request: Request = request_rlp.as_val()?; let request: Request = request_rlp.as_val()?;
@ -709,6 +769,7 @@ impl LightProtocol {
}); });
trace!(target: "pip", "Responded to {}/{} requests in packet {}", responses.len(), num_requests, req_id); trace!(target: "pip", "Responded to {}/{} requests in packet {}", responses.len(), num_requests, req_id);
trace!(target: "pip", "Peer {} has {} credits remaining.", peer_id, peer.local_credits.current());
io.respond(packet::RESPONSE, { io.respond(packet::RESPONSE, {
let mut stream = RlpStream::new_list(3); let mut stream = RlpStream::new_list(3);
@ -782,6 +843,8 @@ impl NetworkProtocolHandler for LightProtocol {
.expect("Error registering sync timer."); .expect("Error registering sync timer.");
io.register_timer(TICK_TIMEOUT, TICK_TIMEOUT_INTERVAL_MS) io.register_timer(TICK_TIMEOUT, TICK_TIMEOUT_INTERVAL_MS)
.expect("Error registering sync timer."); .expect("Error registering sync timer.");
io.register_timer(PROPAGATE_TIMEOUT, PROPAGATE_TIMEOUT_INTERVAL_MS)
.expect("Error registering sync timer.");
} }
fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) {
@ -800,6 +863,7 @@ impl NetworkProtocolHandler for LightProtocol {
match timer { match timer {
TIMEOUT => self.timeout_check(io), TIMEOUT => self.timeout_check(io),
TICK_TIMEOUT => self.tick_handlers(io), TICK_TIMEOUT => self.tick_handlers(io),
PROPAGATE_TIMEOUT => self.propagate_transactions(io),
_ => warn!(target: "pip", "received timeout on unknown token {}", timer), _ => warn!(target: "pip", "received timeout on unknown token {}", timer),
} }
} }

View File

@ -27,22 +27,29 @@ use std::iter::FromIterator;
use request::Request; use request::Request;
use request::Requests; use request::Requests;
use net::{timeout, ReqId}; use net::{timeout, ReqId};
use util::U256;
use time::{Duration, SteadyTime}; use time::{Duration, SteadyTime};
// Request set entry: requests + cost.
#[derive(Debug)]
struct Entry(Requests, U256);
/// Request set. /// Request set.
#[derive(Debug)] #[derive(Debug)]
pub struct RequestSet { pub struct RequestSet {
counter: u64, counter: u64,
cumulative_cost: U256,
base: Option<SteadyTime>, base: Option<SteadyTime>,
ids: HashMap<ReqId, u64>, ids: HashMap<ReqId, u64>,
reqs: BTreeMap<u64, Requests>, reqs: BTreeMap<u64, Entry>,
} }
impl Default for RequestSet { impl Default for RequestSet {
fn default() -> Self { fn default() -> Self {
RequestSet { RequestSet {
counter: 0, counter: 0,
cumulative_cost: 0.into(),
base: None, base: None,
ids: HashMap::new(), ids: HashMap::new(),
reqs: BTreeMap::new(), reqs: BTreeMap::new(),
@ -52,10 +59,12 @@ impl Default for RequestSet {
impl RequestSet { impl RequestSet {
/// Push requests onto the stack. /// Push requests onto the stack.
pub fn insert(&mut self, req_id: ReqId, req: Requests, now: SteadyTime) { pub fn insert(&mut self, req_id: ReqId, req: Requests, cost: U256, now: SteadyTime) {
let counter = self.counter; let counter = self.counter;
self.cumulative_cost = self.cumulative_cost + cost;
self.ids.insert(req_id, counter); self.ids.insert(req_id, counter);
self.reqs.insert(counter, req); self.reqs.insert(counter, Entry(req, cost));
if self.reqs.keys().next().map_or(true, |x| *x == counter) { if self.reqs.keys().next().map_or(true, |x| *x == counter) {
self.base = Some(now); self.base = Some(now);
@ -71,7 +80,7 @@ impl RequestSet {
None => return None, None => return None,
}; };
let req = self.reqs.remove(&id).expect("entry in `ids` implies entry in `reqs`; qed"); let Entry(req, cost) = self.reqs.remove(&id).expect("entry in `ids` implies entry in `reqs`; qed");
match self.reqs.keys().next() { match self.reqs.keys().next() {
Some(k) if *k > id => self.base = Some(now), Some(k) if *k > id => self.base = Some(now),
@ -79,6 +88,7 @@ impl RequestSet {
_ => {} _ => {}
} }
self.cumulative_cost = self.cumulative_cost - cost;
Some(req) Some(req)
} }
@ -93,7 +103,7 @@ impl RequestSet {
let first_req = self.reqs.values().next() let first_req = self.reqs.values().next()
.expect("base existing implies `reqs` non-empty; qed"); .expect("base existing implies `reqs` non-empty; qed");
base + compute_timeout(&first_req) <= now base + compute_timeout(&first_req.0) <= now
} }
/// Collect all pending request ids. /// Collect all pending request ids.
@ -108,6 +118,9 @@ impl RequestSet {
/// Whether the set is empty. /// Whether the set is empty.
pub fn is_empty(&self) -> bool { self.len() == 0 } pub fn is_empty(&self) -> bool { self.len() == 0 }
/// The cumulative cost of all requests in the set.
pub fn cumulative_cost(&self) -> U256 { self.cumulative_cost }
} }
// helper to calculate timeout for a specific set of requests. // helper to calculate timeout for a specific set of requests.
@ -141,8 +154,8 @@ mod tests {
let the_req = RequestBuilder::default().build(); let the_req = RequestBuilder::default().build();
let req_time = compute_timeout(&the_req); let req_time = compute_timeout(&the_req);
req_set.insert(ReqId(0), the_req.clone(), test_begin); req_set.insert(ReqId(0), the_req.clone(), 0.into(), test_begin);
req_set.insert(ReqId(1), the_req, test_begin + Duration::seconds(1)); req_set.insert(ReqId(1), the_req, 0.into(), test_begin + Duration::seconds(1));
assert_eq!(req_set.base, Some(test_begin)); assert_eq!(req_set.base, Some(test_begin));
@ -153,4 +166,22 @@ mod tests {
assert!(!req_set.check_timeout(test_end)); assert!(!req_set.check_timeout(test_end));
assert!(req_set.check_timeout(test_end + Duration::seconds(1))); assert!(req_set.check_timeout(test_end + Duration::seconds(1)));
} }
#[test]
fn cumulative_cost() {
let the_req = RequestBuilder::default().build();
let test_begin = SteadyTime::now();
let test_end = test_begin + Duration::seconds(1);
let mut req_set = RequestSet::default();
for i in 0..5 {
req_set.insert(ReqId(i), the_req.clone(), 1.into(), test_begin);
assert_eq!(req_set.cumulative_cost, (i + 1).into());
}
for i in (0..5).rev() {
assert!(req_set.remove(&ReqId(i), test_end).is_some());
assert_eq!(req_set.cumulative_cost, i.into());
}
}
} }

View File

@ -600,8 +600,8 @@ fn id_guard() {
let mut pending_requests = RequestSet::default(); let mut pending_requests = RequestSet::default();
pending_requests.insert(req_id_1, req.clone(), ::time::SteadyTime::now()); pending_requests.insert(req_id_1, req.clone(), 0.into(), ::time::SteadyTime::now());
pending_requests.insert(req_id_2, req, ::time::SteadyTime::now()); pending_requests.insert(req_id_2, req, 1.into(), ::time::SteadyTime::now());
proto.peers.write().insert(peer_id, ::util::Mutex::new(Peer { proto.peers.write().insert(peer_id, ::util::Mutex::new(Peer {
local_credits: flow_params.create_credits(), local_credits: flow_params.create_credits(),
@ -612,6 +612,7 @@ fn id_guard() {
last_update: ::time::SteadyTime::now(), last_update: ::time::SteadyTime::now(),
pending_requests: pending_requests, pending_requests: pending_requests,
failed_requests: Vec::new(), failed_requests: Vec::new(),
propagated_transactions: Default::default(),
})); }));
// first, malformed responses. // first, malformed responses.

View File

@ -37,7 +37,7 @@ use rlp::RlpStream;
use util::{Bytes, RwLock, Mutex, U256, H256}; use util::{Bytes, RwLock, Mutex, U256, H256};
use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP}; use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP};
use net::{Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId}; use net::{self, Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId};
use cache::Cache; use cache::Cache;
use request::{self as basic_request, Request as NetworkRequest, Response as NetworkResponse}; use request::{self as basic_request, Request as NetworkRequest, Response as NetworkResponse};
@ -57,15 +57,15 @@ impl Peer {
self.capabilities.serve_headers && self.status.head_num > req.num(), self.capabilities.serve_headers && self.status.head_num > req.num(),
Pending::HeaderByHash(_, _) => self.capabilities.serve_headers, Pending::HeaderByHash(_, _) => self.capabilities.serve_headers,
Pending::Block(ref req, _) => Pending::Block(ref req, _) =>
self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= req.header.number()), self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x <= req.header.number()),
Pending::BlockReceipts(ref req, _) => Pending::BlockReceipts(ref req, _) =>
self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= req.0.number()), self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x <= req.0.number()),
Pending::Account(ref req, _) => Pending::Account(ref req, _) =>
self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= req.header.number()), self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x <= req.header.number()),
Pending::Code(ref req, _) => Pending::Code(ref req, _) =>
self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= req.block_id.1), self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x <= req.block_id.1),
Pending::TxProof(ref req, _) => Pending::TxProof(ref req, _) =>
self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= req.header.number()), self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x <= req.header.number()),
} }
} }
} }
@ -210,7 +210,7 @@ impl OnDemand {
/// it as easily. /// it as easily.
pub fn header_by_hash(&self, ctx: &BasicContext, req: request::HeaderByHash) -> Receiver<encoded::Header> { pub fn header_by_hash(&self, ctx: &BasicContext, req: request::HeaderByHash) -> Receiver<encoded::Header> {
let (sender, receiver) = oneshot::channel(); let (sender, receiver) = oneshot::channel();
match self.cache.lock().block_header(&req.0) { match { self.cache.lock().block_header(&req.0) } {
Some(hdr) => sender.send(hdr).expect(RECEIVER_IN_SCOPE), Some(hdr) => sender.send(hdr).expect(RECEIVER_IN_SCOPE),
None => self.dispatch(ctx, Pending::HeaderByHash(req, sender)), None => self.dispatch(ctx, Pending::HeaderByHash(req, sender)),
} }
@ -232,11 +232,13 @@ impl OnDemand {
sender.send(encoded::Block::new(stream.out())).expect(RECEIVER_IN_SCOPE); sender.send(encoded::Block::new(stream.out())).expect(RECEIVER_IN_SCOPE);
} else { } else {
match self.cache.lock().block_body(&req.hash) { match { self.cache.lock().block_body(&req.hash) } {
Some(body) => { Some(body) => {
let mut stream = RlpStream::new_list(3); let mut stream = RlpStream::new_list(3);
let body = body.rlp();
stream.append_raw(&req.header.into_inner(), 1); stream.append_raw(&req.header.into_inner(), 1);
stream.append_raw(&body.into_inner(), 2); stream.append_raw(&body.at(0).as_raw(), 1);
stream.append_raw(&body.at(1).as_raw(), 1);
sender.send(encoded::Block::new(stream.out())).expect(RECEIVER_IN_SCOPE); sender.send(encoded::Block::new(stream.out())).expect(RECEIVER_IN_SCOPE);
} }
@ -255,7 +257,7 @@ impl OnDemand {
if req.0.receipts_root() == SHA3_NULL_RLP { if req.0.receipts_root() == SHA3_NULL_RLP {
sender.send(Vec::new()).expect(RECEIVER_IN_SCOPE); sender.send(Vec::new()).expect(RECEIVER_IN_SCOPE);
} else { } else {
match self.cache.lock().block_receipts(&req.0.hash()) { match { self.cache.lock().block_receipts(&req.0.hash()) } {
Some(receipts) => sender.send(receipts).expect(RECEIVER_IN_SCOPE), Some(receipts) => sender.send(receipts).expect(RECEIVER_IN_SCOPE),
None => self.dispatch(ctx, Pending::BlockReceipts(req, sender)), None => self.dispatch(ctx, Pending::BlockReceipts(req, sender)),
} }
@ -303,23 +305,26 @@ impl OnDemand {
let complete = builder.build(); let complete = builder.build();
let kind = complete.requests()[0].kind();
for (id, peer) in self.peers.read().iter() { for (id, peer) in self.peers.read().iter() {
if !peer.can_handle(&pending) { continue } if !peer.can_handle(&pending) { continue }
match ctx.request_from(*id, complete.clone()) { match ctx.request_from(*id, complete.clone()) {
Ok(req_id) => { Ok(req_id) => {
trace!(target: "on_demand", "Assigning request to peer {}", id); trace!(target: "on_demand", "{}: Assigned {:?} to peer {}",
req_id, kind, id);
self.pending_requests.write().insert( self.pending_requests.write().insert(
req_id, req_id,
pending, pending,
); );
return return
} }
Err(net::Error::NoCredits) => {}
Err(e) => Err(e) =>
trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e),
} }
} }
trace!(target: "on_demand", "No suitable peer for request");
self.orphaned_requests.write().push(pending); self.orphaned_requests.write().push(pending);
} }
@ -353,6 +358,7 @@ impl OnDemand {
let to_dispatch = ::std::mem::replace(&mut *self.orphaned_requests.write(), Vec::new()); let to_dispatch = ::std::mem::replace(&mut *self.orphaned_requests.write(), Vec::new());
trace!(target: "on_demand", "Attempting to dispatch {} orphaned requests.", to_dispatch.len());
for mut orphaned in to_dispatch { for mut orphaned in to_dispatch {
let hung_up = match orphaned { let hung_up = match orphaned {
Pending::HeaderProof(_, ref mut sender) => match *sender { Pending::HeaderProof(_, ref mut sender) => match *sender {
@ -397,10 +403,12 @@ impl Handler for OnDemand {
} }
fn on_announcement(&self, ctx: &EventContext, announcement: &Announcement) { fn on_announcement(&self, ctx: &EventContext, announcement: &Announcement) {
let mut peers = self.peers.write(); {
if let Some(ref mut peer) = peers.get_mut(&ctx.peer()) { let mut peers = self.peers.write();
peer.status.update_from(&announcement); if let Some(ref mut peer) = peers.get_mut(&ctx.peer()) {
peer.capabilities.update_from(&announcement); peer.status.update_from(&announcement);
peer.capabilities.update_from(&announcement);
}
} }
self.dispatch_orphaned(ctx.as_basic()); self.dispatch_orphaned(ctx.as_basic());
@ -422,6 +430,8 @@ impl Handler for OnDemand {
} }
}; };
trace!(target: "on_demand", "Handling response for request {}, kind={:?}", req_id, response.kind());
// handle the response appropriately for the request. // handle the response appropriately for the request.
// all branches which do not return early lead to disabling of the peer // all branches which do not return early lead to disabling of the peer
// due to misbehavior. // due to misbehavior.
@ -441,7 +451,7 @@ impl Handler for OnDemand {
} }
return return
} }
Err(e) => warn!("Error handling response for header request: {:?}", e), Err(e) => warn!(target: "on_demand", "Error handling response for header request: {:?}", e),
} }
} }
} }
@ -454,7 +464,7 @@ impl Handler for OnDemand {
let _ = sender.send(header); let _ = sender.send(header);
return return
} }
Err(e) => warn!("Error handling response for header request: {:?}", e), Err(e) => warn!(target: "on_demand", "Error handling response for header request: {:?}", e),
} }
} }
} }
@ -467,7 +477,7 @@ impl Handler for OnDemand {
let _ = sender.send(block); let _ = sender.send(block);
return return
} }
Err(e) => warn!("Error handling response for block request: {:?}", e), Err(e) => warn!(target: "on_demand", "Error handling response for block request: {:?}", e),
} }
} }
} }
@ -480,7 +490,7 @@ impl Handler for OnDemand {
let _ = sender.send(receipts); let _ = sender.send(receipts);
return return
} }
Err(e) => warn!("Error handling response for receipts request: {:?}", e), Err(e) => warn!(target: "on_demand", "Error handling response for receipts request: {:?}", e),
} }
} }
} }
@ -493,7 +503,7 @@ impl Handler for OnDemand {
let _ = sender.send(maybe_account); let _ = sender.send(maybe_account);
return return
} }
Err(e) => warn!("Error handling response for state request: {:?}", e), Err(e) => warn!(target: "on_demand", "Error handling response for state request: {:?}", e),
} }
} }
} }
@ -504,7 +514,7 @@ impl Handler for OnDemand {
let _ = sender.send(response.code.clone()); let _ = sender.send(response.code.clone());
return return
} }
Err(e) => warn!("Error handling response for code request: {:?}", e), Err(e) => warn!(target: "on_demand", "Error handling response for code request: {:?}", e),
} }
} }
} }
@ -519,7 +529,7 @@ impl Handler for OnDemand {
let _ = sender.send(Err(err)); let _ = sender.send(Err(err));
return return
} }
ProvedExecution::BadProof => warn!("Error handling response for transaction proof request"), ProvedExecution::BadProof => warn!(target: "on_demand", "Error handling response for transaction proof request"),
} }
} }
} }

View File

@ -151,7 +151,8 @@ impl Body {
// concatenate the header and the body. // concatenate the header and the body.
let mut stream = RlpStream::new_list(3); let mut stream = RlpStream::new_list(3);
stream.append_raw(self.header.rlp().as_raw(), 1); stream.append_raw(self.header.rlp().as_raw(), 1);
stream.append_raw(&body.rlp().as_raw(), 2); stream.append_raw(body.rlp().at(0).as_raw(), 1);
stream.append_raw(body.rlp().at(1).as_raw(), 1);
Ok(encoded::Block::new(stream.out())) Ok(encoded::Block::new(stream.out()))
} }
@ -243,12 +244,14 @@ impl TransactionProof {
pub fn check_response(&self, state_items: &[DBValue]) -> ProvedExecution { pub fn check_response(&self, state_items: &[DBValue]) -> ProvedExecution {
let root = self.header.state_root(); let root = self.header.state_root();
let mut env_info = self.env_info.clone();
env_info.gas_limit = self.tx.gas.clone();
state::check_proof( state::check_proof(
state_items, state_items,
root, root,
&self.tx, &self.tx,
&*self.engine, &*self.engine,
&self.env_info, &env_info,
) )
} }
} }

View File

@ -244,7 +244,8 @@ pub enum CompleteRequest {
} }
impl Request { impl Request {
fn kind(&self) -> Kind { /// Get the request kind.
pub fn kind(&self) -> Kind {
match *self { match *self {
Request::Headers(_) => Kind::Headers, Request::Headers(_) => Kind::Headers,
Request::HeaderProof(_) => Kind::HeaderProof, Request::HeaderProof(_) => Kind::HeaderProof,
@ -435,7 +436,8 @@ impl Response {
} }
} }
fn kind(&self) -> Kind { /// Inspect the kind of this response.
pub fn kind(&self) -> Kind {
match *self { match *self {
Response::Headers(_) => Kind::Headers, Response::Headers(_) => Kind::Headers,
Response::HeaderProof(_) => Kind::HeaderProof, Response::HeaderProof(_) => Kind::HeaderProof,
@ -726,7 +728,6 @@ pub mod header_proof {
impl Decodable for Response { impl Decodable for Response {
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> { fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
Ok(Response { Ok(Response {
proof: rlp.list_at(0)?, proof: rlp.list_at(0)?,
hash: rlp.val_at(1)?, hash: rlp.val_at(1)?,
@ -737,12 +738,10 @@ pub mod header_proof {
impl Encodable for Response { impl Encodable for Response {
fn rlp_append(&self, s: &mut RlpStream) { fn rlp_append(&self, s: &mut RlpStream) {
s.begin_list(3).begin_list(self.proof.len()); s.begin_list(3)
for item in &self.proof { .append_list::<Vec<u8>,_>(&self.proof[..])
s.append_list(&item); .append(&self.hash)
} .append(&self.td);
s.append(&self.hash).append(&self.td);
} }
} }
} }
@ -826,7 +825,6 @@ pub mod block_receipts {
impl Decodable for Response { impl Decodable for Response {
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> { fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
Ok(Response { Ok(Response {
receipts: rlp.as_list()?, receipts: rlp.as_list()?,
}) })
@ -923,8 +921,8 @@ pub mod block_body {
use ethcore::transaction::UnverifiedTransaction; use ethcore::transaction::UnverifiedTransaction;
// check body validity. // check body validity.
let _: Vec<FullHeader> = rlp.list_at(0)?; let _: Vec<UnverifiedTransaction> = rlp.list_at(0)?;
let _: Vec<UnverifiedTransaction> = rlp.list_at(1)?; let _: Vec<FullHeader> = rlp.list_at(1)?;
Ok(Response { Ok(Response {
body: encoded::Body::new(rlp.as_raw().to_owned()), body: encoded::Body::new(rlp.as_raw().to_owned()),
@ -1063,12 +1061,9 @@ pub mod account {
impl Encodable for Response { impl Encodable for Response {
fn rlp_append(&self, s: &mut RlpStream) { fn rlp_append(&self, s: &mut RlpStream) {
s.begin_list(5).begin_list(self.proof.len()); s.begin_list(5)
for item in &self.proof { .append_list::<Vec<u8>,_>(&self.proof[..])
s.append_list(&item); .append(&self.nonce)
}
s.append(&self.nonce)
.append(&self.balance) .append(&self.balance)
.append(&self.code_hash) .append(&self.code_hash)
.append(&self.storage_root); .append(&self.storage_root);
@ -1207,11 +1202,9 @@ pub mod storage {
impl Encodable for Response { impl Encodable for Response {
fn rlp_append(&self, s: &mut RlpStream) { fn rlp_append(&self, s: &mut RlpStream) {
s.begin_list(2).begin_list(self.proof.len()); s.begin_list(2)
for item in &self.proof { .append_list::<Vec<u8>,_>(&self.proof[..])
s.append_list(&item); .append(&self.value);
}
s.append(&self.value);
} }
} }
} }
@ -1486,9 +1479,16 @@ mod tests {
fn check_roundtrip<T>(val: T) fn check_roundtrip<T>(val: T)
where T: ::rlp::Encodable + ::rlp::Decodable + PartialEq + ::std::fmt::Debug where T: ::rlp::Encodable + ::rlp::Decodable + PartialEq + ::std::fmt::Debug
{ {
// check as single value.
let bytes = ::rlp::encode(&val); let bytes = ::rlp::encode(&val);
let new_val: T = ::rlp::decode(&bytes); let new_val: T = ::rlp::decode(&bytes);
assert_eq!(val, new_val); assert_eq!(val, new_val);
// check as list containing single value.
let list = [val];
let bytes = ::rlp::encode_list(&list);
let new_list: Vec<T> = ::rlp::decode_list(&bytes);
assert_eq!(&list, &new_list[..]);
} }
#[test] #[test]
@ -1540,7 +1540,7 @@ mod tests {
let full_req = Request::HeaderProof(req.clone()); let full_req = Request::HeaderProof(req.clone());
let res = HeaderProofResponse { let res = HeaderProofResponse {
proof: Vec::new(), proof: vec![vec![1, 2, 3], vec![4, 5, 6]],
hash: Default::default(), hash: Default::default(),
td: 100.into(), td: 100.into(),
}; };
@ -1572,6 +1572,7 @@ mod tests {
#[test] #[test]
fn body_roundtrip() { fn body_roundtrip() {
use ethcore::transaction::{Transaction, UnverifiedTransaction};
let req = IncompleteBodyRequest { let req = IncompleteBodyRequest {
hash: Field::Scalar(Default::default()), hash: Field::Scalar(Default::default()),
}; };
@ -1579,8 +1580,12 @@ mod tests {
let full_req = Request::Body(req.clone()); let full_req = Request::Body(req.clone());
let res = BodyResponse { let res = BodyResponse {
body: { body: {
let header = ::ethcore::header::Header::default();
let tx = UnverifiedTransaction::from(Transaction::default().fake_sign(Default::default()));
let mut stream = RlpStream::new_list(2); let mut stream = RlpStream::new_list(2);
stream.begin_list(0).begin_list(0); stream.begin_list(2).append(&tx).append(&tx)
.begin_list(1).append(&header);
::ethcore::encoded::Body::new(stream.out()) ::ethcore::encoded::Body::new(stream.out())
}, },
}; };
@ -1601,7 +1606,7 @@ mod tests {
let full_req = Request::Account(req.clone()); let full_req = Request::Account(req.clone());
let res = AccountResponse { let res = AccountResponse {
proof: Vec::new(), proof: vec![vec![1, 2, 3], vec![4, 5, 6]],
nonce: 100.into(), nonce: 100.into(),
balance: 123456.into(), balance: 123456.into(),
code_hash: Default::default(), code_hash: Default::default(),
@ -1625,7 +1630,7 @@ mod tests {
let full_req = Request::Storage(req.clone()); let full_req = Request::Storage(req.clone());
let res = StorageResponse { let res = StorageResponse {
proof: Vec::new(), proof: vec![vec![1, 2, 3], vec![4, 5, 6]],
value: H256::default(), value: H256::default(),
}; };
let full_res = Response::Storage(res.clone()); let full_res = Response::Storage(res.clone());
@ -1707,4 +1712,31 @@ mod tests {
assert_eq!(rlp.val_at::<usize>(0).unwrap(), 100usize); assert_eq!(rlp.val_at::<usize>(0).unwrap(), 100usize);
assert_eq!(rlp.list_at::<Request>(1).unwrap(), reqs); assert_eq!(rlp.list_at::<Request>(1).unwrap(), reqs);
} }
#[test]
fn responses_vec() {
let mut stream = RlpStream::new_list(2);
stream.begin_list(0).begin_list(0);
let body = ::ethcore::encoded::Body::new(stream.out());
let reqs = vec![
Response::Headers(HeadersResponse { headers: vec![] }),
Response::HeaderProof(HeaderProofResponse { proof: vec![], hash: Default::default(), td: 100.into()}),
Response::Receipts(ReceiptsResponse { receipts: vec![Default::default()] }),
Response::Body(BodyResponse { body: body }),
Response::Account(AccountResponse {
proof: vec![],
nonce: 100.into(),
balance: 123.into(),
code_hash: Default::default(),
storage_root: Default::default()
}),
Response::Storage(StorageResponse { proof: vec![], value: H256::default() }),
Response::Code(CodeResponse { code: vec![1, 2, 3, 4, 5] }),
Response::Execution(ExecutionResponse { items: vec![] }),
];
let raw = ::rlp::encode_list(&reqs);
assert_eq!(::rlp::decode_list::<Response>(&raw), reqs);
}
} }

View File

@ -1627,10 +1627,12 @@ impl ::client::ProvingBlockChainClient for Client {
} }
fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option<Vec<DBValue>> { fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option<Vec<DBValue>> {
let (state, env_info) = match (self.state_at(id), self.env_info(id)) { let (state, mut env_info) = match (self.state_at(id), self.env_info(id)) {
(Some(s), Some(e)) => (s, e), (Some(s), Some(e)) => (s, e),
_ => return None, _ => return None,
}; };
env_info.gas_limit = transaction.gas.clone();
let mut jdb = self.state_db.lock().journal_db().boxed_clone(); let mut jdb = self.state_db.lock().journal_db().boxed_clone();
let backend = state::backend::Proving::new(jdb.as_hashdb_mut()); let backend = state::backend::Proving::new(jdb.as_hashdb_mut());

View File

@ -26,7 +26,7 @@ use verification::{VerifierType, QueueConfig};
use util::{journaldb, CompactionProfile}; use util::{journaldb, CompactionProfile};
/// Client state db compaction profile /// Client state db compaction profile
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq, Clone)]
pub enum DatabaseCompactionProfile { pub enum DatabaseCompactionProfile {
/// Try to determine compaction profile automatically /// Try to determine compaction profile automatically
Auto, Auto,

View File

@ -38,8 +38,10 @@ pub const COL_TRACE: Option<u32> = Some(4);
pub const COL_ACCOUNT_BLOOM: Option<u32> = Some(5); pub const COL_ACCOUNT_BLOOM: Option<u32> = Some(5);
/// Column for general information from the local node which can persist. /// Column for general information from the local node which can persist.
pub const COL_NODE_INFO: Option<u32> = Some(6); pub const COL_NODE_INFO: Option<u32> = Some(6);
/// Column for the light client chain.
pub const COL_LIGHT_CHAIN: Option<u32> = Some(7);
/// Number of columns in DB /// Number of columns in DB
pub const NUM_COLUMNS: Option<u32> = Some(7); pub const NUM_COLUMNS: Option<u32> = Some(8);
/// Modes for updating caches. /// Modes for updating caches.
#[derive(Clone, Copy)] #[derive(Clone, Copy)]

View File

@ -16,6 +16,8 @@
//! Database migrations. //! Database migrations.
use util::migration::ChangeColumns;
pub mod state; pub mod state;
pub mod blocks; pub mod blocks;
pub mod extras; pub mod extras;
@ -27,5 +29,18 @@ pub use self::v9::Extract;
mod v10; mod v10;
pub use self::v10::ToV10; pub use self::v10::ToV10;
mod v11; /// The migration from v10 to v11.
pub use self::v11::TO_V11; /// Adds a column for node info.
pub const TO_V11: ChangeColumns = ChangeColumns {
pre_columns: Some(6),
post_columns: Some(7),
version: 11,
};
/// The migration from v11 to v12.
/// Adds a column for light chain storage.
pub const TO_V12: ChangeColumns = ChangeColumns {
pre_columns: Some(7),
post_columns: Some(8),
version: 12,
};

View File

@ -23,11 +23,10 @@ use snapshot::Error;
use util::{U256, H256, Bytes, HashDB, SHA3_EMPTY, SHA3_NULL_RLP}; use util::{U256, H256, Bytes, HashDB, SHA3_EMPTY, SHA3_NULL_RLP};
use util::trie::{TrieDB, Trie}; use util::trie::{TrieDB, Trie};
use rlp::{RlpStream, UntrustedRlp}; use rlp::{RlpStream, UntrustedRlp};
use itertools::Itertools;
use std::collections::HashSet; use std::collections::HashSet;
// An empty account -- these are replaced with RLP null data for a space optimization. // An empty account -- these were replaced with RLP null data for a space optimization in v1.
const ACC_EMPTY: BasicAccount = BasicAccount { const ACC_EMPTY: BasicAccount = BasicAccount {
nonce: U256([0, 0, 0, 0]), nonce: U256([0, 0, 0, 0]),
balance: U256([0, 0, 0, 0]), balance: U256([0, 0, 0, 0]),
@ -62,28 +61,19 @@ impl CodeState {
} }
// walk the account's storage trie, returning a vector of RLP items containing the // walk the account's storage trie, returning a vector of RLP items containing the
// account properties and the storage. Each item contains at most `max_storage_items` // account address hash, account properties and the storage. Each item contains at most `max_storage_items`
// storage records split according to snapshot format definition. // storage records split according to snapshot format definition.
pub fn to_fat_rlps(acc: &BasicAccount, acct_db: &AccountDB, used_code: &mut HashSet<H256>, max_storage_items: usize) -> Result<Vec<Bytes>, Error> { pub fn to_fat_rlps(account_hash: &H256, acc: &BasicAccount, acct_db: &AccountDB, used_code: &mut HashSet<H256>, first_chunk_size: usize, max_chunk_size: usize) -> Result<Vec<Bytes>, Error> {
if acc == &ACC_EMPTY {
return Ok(vec![::rlp::NULL_RLP.to_vec()]);
}
let db = TrieDB::new(acct_db, &acc.storage_root)?; let db = TrieDB::new(acct_db, &acc.storage_root)?;
let mut chunks = Vec::new();
let mut db_iter = db.iter()?;
let mut target_chunk_size = first_chunk_size;
let mut account_stream = RlpStream::new_list(2);
let mut leftover: Option<Vec<u8>> = None;
loop {
account_stream.append(account_hash);
account_stream.begin_list(5);
let chunks = db.iter()?.chunks(max_storage_items);
let pair_chunks = chunks.into_iter().map(|chunk| chunk.collect());
pair_chunks.pad_using(1, |_| Vec::new(), ).map(|pairs| {
let mut stream = RlpStream::new_list(pairs.len());
for r in pairs {
let (k, v) = r?;
stream.begin_list(2).append(&k).append(&&*v);
}
let pairs_rlp = stream.out();
let mut account_stream = RlpStream::new_list(5);
account_stream.append(&acc.nonce) account_stream.append(&acc.nonce)
.append(&acc.balance); .append(&acc.balance);
@ -105,9 +95,49 @@ pub fn to_fat_rlps(acc: &BasicAccount, acct_db: &AccountDB, used_code: &mut Hash
} }
} }
account_stream.append_raw(&pairs_rlp, 1); account_stream.begin_unbounded_list();
Ok(account_stream.out()) if account_stream.len() > target_chunk_size {
}).collect() // account does not fit, push an empty record to mark a new chunk
target_chunk_size = max_chunk_size;
chunks.push(Vec::new());
}
if let Some(pair) = leftover.take() {
if !account_stream.append_raw_checked(&pair, 1, target_chunk_size) {
return Err(Error::ChunkTooSmall);
}
}
loop {
match db_iter.next() {
Some(Ok((k, v))) => {
let pair = {
let mut stream = RlpStream::new_list(2);
stream.append(&k).append(&&*v);
stream.drain()
};
if !account_stream.append_raw_checked(&pair, 1, target_chunk_size) {
account_stream.complete_unbounded_list();
let stream = ::std::mem::replace(&mut account_stream, RlpStream::new_list(2));
chunks.push(stream.out());
target_chunk_size = max_chunk_size;
leftover = Some(pair.to_vec());
break;
}
},
Some(Err(e)) => {
return Err(e.into());
},
None => {
account_stream.complete_unbounded_list();
let stream = ::std::mem::replace(&mut account_stream, RlpStream::new_list(2));
chunks.push(stream.out());
return Ok(chunks);
}
}
}
}
} }
// decode a fat rlp, and rebuild the storage trie as we go. // decode a fat rlp, and rebuild the storage trie as we go.
@ -181,7 +211,7 @@ mod tests {
use snapshot::tests::helpers::fill_storage; use snapshot::tests::helpers::fill_storage;
use util::sha3::{SHA3_EMPTY, SHA3_NULL_RLP}; use util::sha3::{SHA3_EMPTY, SHA3_NULL_RLP};
use util::{Address, H256, HashDB, DBValue}; use util::{Address, H256, HashDB, DBValue, Hashable};
use rlp::UntrustedRlp; use rlp::UntrustedRlp;
use std::collections::HashSet; use std::collections::HashSet;
@ -203,8 +233,8 @@ mod tests {
let thin_rlp = ::rlp::encode(&account); let thin_rlp = ::rlp::encode(&account);
assert_eq!(::rlp::decode::<BasicAccount>(&thin_rlp), account); assert_eq!(::rlp::decode::<BasicAccount>(&thin_rlp), account);
let fat_rlps = to_fat_rlps(&account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), usize::max_value()).unwrap(); let fat_rlps = to_fat_rlps(&addr.sha3(), &account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), usize::max_value(), usize::max_value()).unwrap();
let fat_rlp = UntrustedRlp::new(&fat_rlps[0]); let fat_rlp = UntrustedRlp::new(&fat_rlps[0]).at(1).unwrap();
assert_eq!(from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr), fat_rlp, H256::zero()).unwrap().0, account); assert_eq!(from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr), fat_rlp, H256::zero()).unwrap().0, account);
} }
@ -228,8 +258,8 @@ mod tests {
let thin_rlp = ::rlp::encode(&account); let thin_rlp = ::rlp::encode(&account);
assert_eq!(::rlp::decode::<BasicAccount>(&thin_rlp), account); assert_eq!(::rlp::decode::<BasicAccount>(&thin_rlp), account);
let fat_rlp = to_fat_rlps(&account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), usize::max_value()).unwrap(); let fat_rlp = to_fat_rlps(&addr.sha3(), &account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), usize::max_value(), usize::max_value()).unwrap();
let fat_rlp = UntrustedRlp::new(&fat_rlp[0]); let fat_rlp = UntrustedRlp::new(&fat_rlp[0]).at(1).unwrap();
assert_eq!(from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr), fat_rlp, H256::zero()).unwrap().0, account); assert_eq!(from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr), fat_rlp, H256::zero()).unwrap().0, account);
} }
@ -253,11 +283,11 @@ mod tests {
let thin_rlp = ::rlp::encode(&account); let thin_rlp = ::rlp::encode(&account);
assert_eq!(::rlp::decode::<BasicAccount>(&thin_rlp), account); assert_eq!(::rlp::decode::<BasicAccount>(&thin_rlp), account);
let fat_rlps = to_fat_rlps(&account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), 100).unwrap(); let fat_rlps = to_fat_rlps(&addr.sha3(), &account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), 500, 1000).unwrap();
let mut root = SHA3_NULL_RLP; let mut root = SHA3_NULL_RLP;
let mut restored_account = None; let mut restored_account = None;
for rlp in fat_rlps { for rlp in fat_rlps {
let fat_rlp = UntrustedRlp::new(&rlp); let fat_rlp = UntrustedRlp::new(&rlp).at(1).unwrap();
restored_account = Some(from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr), fat_rlp, root).unwrap().0); restored_account = Some(from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr), fat_rlp, root).unwrap().0);
root = restored_account.as_ref().unwrap().storage_root.clone(); root = restored_account.as_ref().unwrap().storage_root.clone();
} }
@ -297,12 +327,12 @@ mod tests {
let mut used_code = HashSet::new(); let mut used_code = HashSet::new();
let fat_rlp1 = to_fat_rlps(&account1, &AccountDB::new(db.as_hashdb(), &addr1), &mut used_code, usize::max_value()).unwrap(); let fat_rlp1 = to_fat_rlps(&addr1.sha3(), &account1, &AccountDB::new(db.as_hashdb(), &addr1), &mut used_code, usize::max_value(), usize::max_value()).unwrap();
let fat_rlp2 = to_fat_rlps(&account2, &AccountDB::new(db.as_hashdb(), &addr2), &mut used_code, usize::max_value()).unwrap(); let fat_rlp2 = to_fat_rlps(&addr2.sha3(), &account2, &AccountDB::new(db.as_hashdb(), &addr2), &mut used_code, usize::max_value(), usize::max_value()).unwrap();
assert_eq!(used_code.len(), 1); assert_eq!(used_code.len(), 1);
let fat_rlp1 = UntrustedRlp::new(&fat_rlp1[0]); let fat_rlp1 = UntrustedRlp::new(&fat_rlp1[0]).at(1).unwrap();
let fat_rlp2 = UntrustedRlp::new(&fat_rlp2[0]); let fat_rlp2 = UntrustedRlp::new(&fat_rlp2[0]).at(1).unwrap();
let (acc, maybe_code) = from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr2), fat_rlp2, H256::zero()).unwrap(); let (acc, maybe_code) = from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr2), fat_rlp2, H256::zero()).unwrap();
assert!(maybe_code.is_none()); assert!(maybe_code.is_none());
@ -316,9 +346,6 @@ mod tests {
#[test] #[test]
fn encoding_empty_acc() { fn encoding_empty_acc() {
let mut db = get_temp_state_db(); let mut db = get_temp_state_db();
let mut used_code = HashSet::new();
assert_eq!(to_fat_rlps(&ACC_EMPTY, &AccountDB::new(db.as_hashdb(), &Address::default()), &mut used_code, usize::max_value()).unwrap(), vec![::rlp::NULL_RLP.to_vec()]);
assert_eq!(from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &Address::default()), UntrustedRlp::new(&::rlp::NULL_RLP), H256::zero()).unwrap(), (ACC_EMPTY, None)); assert_eq!(from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &Address::default()), UntrustedRlp::new(&::rlp::NULL_RLP), H256::zero()).unwrap(), (ACC_EMPTY, None));
} }
} }

View File

@ -55,6 +55,8 @@ pub enum Error {
Io(::std::io::Error), Io(::std::io::Error),
/// Snapshot version is not supported. /// Snapshot version is not supported.
VersionNotSupported(u64), VersionNotSupported(u64),
/// Max chunk size is to small to fit basic account data.
ChunkTooSmall,
} }
impl fmt::Display for Error { impl fmt::Display for Error {
@ -76,6 +78,7 @@ impl fmt::Display for Error {
Error::Decoder(ref err) => err.fmt(f), Error::Decoder(ref err) => err.fmt(f),
Error::Trie(ref err) => err.fmt(f), Error::Trie(ref err) => err.fmt(f),
Error::VersionNotSupported(ref ver) => write!(f, "Snapshot version {} is not supprted.", ver), Error::VersionNotSupported(ref ver) => write!(f, "Snapshot version {} is not supprted.", ver),
Error::ChunkTooSmall => write!(f, "Chunk size is too small."),
} }
} }
} }

View File

@ -83,9 +83,6 @@ mod traits {
// Try to have chunks be around 4MB (before compression) // Try to have chunks be around 4MB (before compression)
const PREFERRED_CHUNK_SIZE: usize = 4 * 1024 * 1024; const PREFERRED_CHUNK_SIZE: usize = 4 * 1024 * 1024;
// Try to have chunks be around 4MB (before compression)
const MAX_STORAGE_ENTRIES_PER_ACCOUNT_RECORD: usize = 80_000;
// How many blocks to include in a snapshot, starting from the head of the chain. // How many blocks to include in a snapshot, starting from the head of the chain.
const SNAPSHOT_BLOCKS: u64 = 30000; const SNAPSHOT_BLOCKS: u64 = 30000;
@ -305,20 +302,9 @@ impl<'a> StateChunker<'a> {
// //
// If the buffer is greater than the desired chunk size, // If the buffer is greater than the desired chunk size,
// this will write out the data to disk. // this will write out the data to disk.
fn push(&mut self, account_hash: Bytes, data: Bytes, force_chunk: bool) -> Result<(), Error> { fn push(&mut self, data: Bytes) -> Result<(), Error> {
let pair = { self.cur_size += data.len();
let mut stream = RlpStream::new_list(2); self.rlps.push(data);
stream.append(&account_hash).append_raw(&data, 1);
stream.out()
};
if force_chunk || self.cur_size + pair.len() >= PREFERRED_CHUNK_SIZE {
self.write_chunk()?;
}
self.cur_size += pair.len();
self.rlps.push(pair);
Ok(()) Ok(())
} }
@ -348,6 +334,11 @@ impl<'a> StateChunker<'a> {
Ok(()) Ok(())
} }
// Get current chunk size.
fn chunk_size(&self) -> usize {
self.cur_size
}
} }
/// Walk the given state database starting from the given root, /// Walk the given state database starting from the given root,
@ -377,9 +368,12 @@ pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex<SnapshotWriter +
let account_db = AccountDB::from_hash(db, account_key_hash); let account_db = AccountDB::from_hash(db, account_key_hash);
let fat_rlps = account::to_fat_rlps(&account, &account_db, &mut used_code, MAX_STORAGE_ENTRIES_PER_ACCOUNT_RECORD)?; let fat_rlps = account::to_fat_rlps(&account_key_hash, &account, &account_db, &mut used_code, PREFERRED_CHUNK_SIZE - chunker.chunk_size(), PREFERRED_CHUNK_SIZE)?;
for (i, fat_rlp) in fat_rlps.into_iter().enumerate() { for (i, fat_rlp) in fat_rlps.into_iter().enumerate() {
chunker.push(account_key.clone(), fat_rlp, i > 0)?; if i > 0 {
chunker.write_chunk()?;
}
chunker.push(fat_rlp)?;
} }
} }

View File

@ -122,10 +122,9 @@ fn get_code_from_prev_chunk() {
let mut db = MemoryDB::new(); let mut db = MemoryDB::new();
AccountDBMut::from_hash(&mut db, hash).insert(&code[..]); AccountDBMut::from_hash(&mut db, hash).insert(&code[..]);
let fat_rlp = account::to_fat_rlps(&acc, &AccountDB::from_hash(&db, hash), &mut used_code, usize::max_value()).unwrap(); let fat_rlp = account::to_fat_rlps(&hash, &acc, &AccountDB::from_hash(&db, hash), &mut used_code, usize::max_value(), usize::max_value()).unwrap();
let mut stream = RlpStream::new_list(1); let mut stream = RlpStream::new_list(1);
stream.begin_list(2).append(&hash).append_raw(&fat_rlp[0], 1); stream.append_raw(&fat_rlp[0], 1);
stream.out() stream.out()
}; };

View File

@ -1,6 +1,6 @@
{ {
"name": "parity.js", "name": "parity.js",
"version": "1.7.45", "version": "1.7.47",
"main": "release/index.js", "main": "release/index.js",
"jsnext:main": "src/index.js", "jsnext:main": "src/index.js",
"author": "Parity Team <admin@parity.io>", "author": "Parity Team <admin@parity.io>",
@ -176,7 +176,7 @@
"geopattern": "1.2.3", "geopattern": "1.2.3",
"isomorphic-fetch": "2.2.1", "isomorphic-fetch": "2.2.1",
"js-sha3": "0.5.5", "js-sha3": "0.5.5",
"keythereum": "0.4.3", "keythereum": "0.4.6",
"lodash": "4.17.2", "lodash": "4.17.2",
"loglevel": "1.4.1", "loglevel": "1.4.1",
"marked": "0.3.6", "marked": "0.3.6",

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
import { keythereum } from '../ethkey'; import { createKeyObject, decryptPrivateKey } from '../ethkey';
export default class Account { export default class Account {
constructor (persist, data) { constructor (persist, data) {
@ -31,12 +31,14 @@ export default class Account {
} }
isValidPassword (password) { isValidPassword (password) {
try { return decryptPrivateKey(this._keyObject, password)
keythereum.recover(Buffer.from(password), this._keyObject); .then((privateKey) => {
return true; if (!privateKey) {
} catch (e) { return false;
return false; }
}
return true;
});
} }
get address () { get address () {
@ -68,21 +70,23 @@ export default class Account {
} }
decryptPrivateKey (password) { decryptPrivateKey (password) {
return keythereum.recover(Buffer.from(password), this._keyObject); return decryptPrivateKey(this._keyObject, password);
}
changePassword (key, password) {
return createKeyObject(key, password).then((keyObject) => {
this._keyObject = keyObject;
this._persist();
});
} }
static fromPrivateKey (persist, key, password) { static fromPrivateKey (persist, key, password) {
const iv = keythereum.crypto.randomBytes(16); return createKeyObject(key, password).then((keyObject) => {
const salt = keythereum.crypto.randomBytes(32); const account = new Account(persist, { keyObject });
// Keythereum will fail if `password` is an empty string return account;
password = Buffer.from(password); });
const keyObject = keythereum.dump(password, key, salt, iv);
const account = new Account(persist, { keyObject });
return account;
} }
toJSON () { toJSON () {

View File

@ -38,14 +38,23 @@ export default class Accounts {
create (secret, password) { create (secret, password) {
const privateKey = Buffer.from(secret.slice(2), 'hex'); const privateKey = Buffer.from(secret.slice(2), 'hex');
const account = Account.fromPrivateKey(this.persist, privateKey, password);
this._store.push(account); return Account
this.lastAddress = account.address; .fromPrivateKey(this.persist, privateKey, password)
.then((account) => {
const { address } = account;
this.persist(); if (this._store.find((account) => account.address === address)) {
throw new Error(`Account ${address} already exists!`);
}
return account.address; this._store.push(account);
this.lastAddress = address;
this.persist();
return account.address;
});
} }
set lastAddress (value) { set lastAddress (value) {
@ -73,28 +82,41 @@ export default class Accounts {
remove (address, password) { remove (address, password) {
address = address.toLowerCase(); address = address.toLowerCase();
const account = this.get(address);
if (!account) {
return false;
}
return account
.isValidPassword(password)
.then((isValid) => {
if (!isValid) {
return false;
}
if (address === this.lastAddress) {
this.lastAddress = NULL_ADDRESS;
}
this.removeUnsafe(address);
return true;
});
}
removeUnsafe (address) {
address = address.toLowerCase();
const index = this._store.findIndex((account) => account.address === address); const index = this._store.findIndex((account) => account.address === address);
if (index === -1) { if (index === -1) {
return false; return;
}
const account = this._store[index];
if (!account.isValidPassword(password)) {
console.log('invalid password');
return false;
}
if (address === this.lastAddress) {
this.lastAddress = NULL_ADDRESS;
} }
this._store.splice(index, 1); this._store.splice(index, 1);
this.persist(); this.persist();
return true;
} }
mapArray (mapper) { mapArray (mapper) {

View File

@ -14,31 +14,35 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
// Allow a web worker in the browser, with a fallback for Node.js import workerPool from './workerPool';
const hasWebWorkers = typeof Worker !== 'undefined';
const KeyWorker = hasWebWorkers ? require('worker-loader!./worker')
: require('./worker').KeyWorker;
// Local accounts should never be used outside of the browser export function createKeyObject (key, password) {
export let keythereum = null; return workerPool.getWorker().action('createKeyObject', { key, password })
.then((obj) => JSON.parse(obj));
}
if (hasWebWorkers) { export function decryptPrivateKey (keyObject, password) {
require('keythereum/dist/keythereum'); return workerPool
.getWorker()
.action('decryptPrivateKey', { keyObject, password })
.then((privateKey) => {
if (privateKey) {
return Buffer.from(privateKey);
}
keythereum = window.keythereum; return null;
});
} }
export function phraseToAddress (phrase) { export function phraseToAddress (phrase) {
return phraseToWallet(phrase).then((wallet) => wallet.address); return phraseToWallet(phrase)
.then((wallet) => wallet.address);
} }
export function phraseToWallet (phrase) { export function phraseToWallet (phrase) {
return new Promise((resolve, reject) => { return workerPool.getWorker().action('phraseToWallet', phrase);
const worker = new KeyWorker(); }
worker.postMessage(phrase); export function verifySecret (secret) {
worker.onmessage = ({ data }) => { return workerPool.getWorker().action('verifySecret', secret);
resolve(data);
};
});
} }

View File

@ -14,58 +14,104 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
import { keccak_256 as keccak256 } from 'js-sha3';
import secp256k1 from 'secp256k1/js'; import secp256k1 from 'secp256k1/js';
import { keccak_256 as keccak256 } from 'js-sha3';
import { bytesToHex } from '~/api/util/format';
const isWorker = typeof self !== 'undefined';
// Stay compatible between environments // Stay compatible between environments
if (typeof self !== 'object') { if (!isWorker) {
const scope = typeof global === 'undefined' ? window : global; const scope = typeof global === 'undefined' ? window : global;
scope.self = scope; scope.self = scope;
} }
function bytesToHex (bytes) { // keythereum should never be used outside of the browser
return '0x' + Array.from(bytes).map(n => ('0' + n.toString(16)).slice(-2)).join(''); let keythereum = null;
if (isWorker) {
require('keythereum/dist/keythereum');
keythereum = self.keythereum;
} }
// Logic ported from /ethkey/src/brain.rs function route ({ action, payload }) {
function phraseToWallet (phrase) { if (action in actions) {
let secret = keccak256.array(phrase); return actions[action](payload);
for (let i = 0; i < 16384; i++) {
secret = keccak256.array(secret);
} }
while (true) { return null;
secret = keccak256.array(secret); }
const secretBuf = Buffer.from(secret); const actions = {
phraseToWallet (phrase) {
let secret = keccak256.array(phrase);
if (secp256k1.privateKeyVerify(secretBuf)) { for (let i = 0; i < 16384; i++) {
// No compression, slice out last 64 bytes secret = keccak256.array(secret);
const publicBuf = secp256k1.publicKeyCreate(secretBuf, false).slice(-64); }
const address = keccak256.array(publicBuf).slice(12);
if (address[0] !== 0) { while (true) {
continue; secret = keccak256.array(secret);
const secretBuf = Buffer.from(secret);
if (secp256k1.privateKeyVerify(secretBuf)) {
// No compression, slice out last 64 bytes
const publicBuf = secp256k1.publicKeyCreate(secretBuf, false).slice(-64);
const address = keccak256.array(publicBuf).slice(12);
if (address[0] !== 0) {
continue;
}
const wallet = {
secret: bytesToHex(secretBuf),
public: bytesToHex(publicBuf),
address: bytesToHex(address)
};
return wallet;
} }
}
},
const wallet = { verifySecret (secret) {
secret: bytesToHex(secretBuf), const key = Buffer.from(secret.slice(2), 'hex');
public: bytesToHex(publicBuf),
address: bytesToHex(address)
};
return wallet; return secp256k1.privateKeyVerify(key);
},
createKeyObject ({ key, password }) {
key = Buffer.from(key);
password = Buffer.from(password);
const iv = keythereum.crypto.randomBytes(16);
const salt = keythereum.crypto.randomBytes(32);
const keyObject = keythereum.dump(password, key, salt, iv);
return JSON.stringify(keyObject);
},
decryptPrivateKey ({ keyObject, password }) {
password = Buffer.from(password);
try {
const key = keythereum.recover(password, keyObject);
// Convert to array to safely send from the worker
return Array.from(key);
} catch (e) {
return null;
} }
} }
} };
self.onmessage = function ({ data }) { self.onmessage = function ({ data }) {
const wallet = phraseToWallet(data); const result = route(data);
postMessage(wallet); postMessage(result);
close();
}; };
// Emulate a web worker in Node.js // Emulate a web worker in Node.js
@ -73,9 +119,9 @@ class KeyWorker {
postMessage (data) { postMessage (data) {
// Force async // Force async
setTimeout(() => { setTimeout(() => {
const wallet = phraseToWallet(data); const result = route(data);
this.onmessage({ data: wallet }); this.onmessage({ data: result });
}, 0); }, 0);
} }

View File

@ -0,0 +1,61 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
// Allow a web worker in the browser, with a fallback for Node.js
const hasWebWorkers = typeof Worker !== 'undefined';
const KeyWorker = hasWebWorkers ? require('worker-loader!./worker')
: require('./worker').KeyWorker;
class WorkerContainer {
busy = false;
_worker = new KeyWorker();
action (action, payload) {
if (this.busy) {
throw new Error('Cannot issue an action on a busy worker!');
}
this.busy = true;
return new Promise((resolve, reject) => {
this._worker.postMessage({ action, payload });
this._worker.onmessage = ({ data }) => {
this.busy = false;
resolve(data);
};
});
}
}
class WorkerPool {
pool = [];
getWorker () {
let container = this.pool.find((container) => !container.busy);
if (container) {
return container;
}
container = new WorkerContainer();
this.pool.push(container);
return container;
}
}
export default new WorkerPool();

View File

@ -19,7 +19,7 @@ import accounts from './accounts';
import transactions from './transactions'; import transactions from './transactions';
import { Middleware } from '../transport'; import { Middleware } from '../transport';
import { inNumber16 } from '../format/input'; import { inNumber16 } from '../format/input';
import { phraseToWallet, phraseToAddress } from './ethkey'; import { phraseToWallet, phraseToAddress, verifySecret } from './ethkey';
import { randomPhrase } from '@parity/wordlist'; import { randomPhrase } from '@parity/wordlist';
export default class LocalAccountsMiddleware extends Middleware { export default class LocalAccountsMiddleware extends Middleware {
@ -57,6 +57,22 @@ export default class LocalAccountsMiddleware extends Middleware {
}); });
}); });
register('parity_changePassword', ([address, oldPassword, newPassword]) => {
const account = accounts.get(address);
return account
.decryptPrivateKey(oldPassword)
.then((privateKey) => {
if (!privateKey) {
return false;
}
account.changePassword(privateKey, newPassword);
return true;
});
});
register('parity_checkRequest', ([id]) => { register('parity_checkRequest', ([id]) => {
return transactions.hash(id) || Promise.resolve(null); return transactions.hash(id) || Promise.resolve(null);
}); });
@ -84,6 +100,17 @@ export default class LocalAccountsMiddleware extends Middleware {
}); });
}); });
register('parity_newAccountFromSecret', ([secret, password]) => {
return verifySecret(secret)
.then((isValid) => {
if (!isValid) {
throw new Error('Invalid secret key');
}
return accounts.create(secret, password);
});
});
register('parity_setAccountMeta', ([address, meta]) => { register('parity_setAccountMeta', ([address, meta]) => {
accounts.get(address).meta = meta; accounts.get(address).meta = meta;
@ -127,6 +154,12 @@ export default class LocalAccountsMiddleware extends Middleware {
return accounts.remove(address, password); return accounts.remove(address, password);
}); });
register('parity_testPassword', ([address, password]) => {
const account = accounts.get(address);
return account.isValidPassword(password);
});
register('signer_confirmRequest', ([id, modify, password]) => { register('signer_confirmRequest', ([id, modify, password]) => {
const { const {
gasPrice, gasPrice,
@ -137,30 +170,33 @@ export default class LocalAccountsMiddleware extends Middleware {
data data
} = Object.assign(transactions.get(id), modify); } = Object.assign(transactions.get(id), modify);
return this const account = accounts.get(from);
.rpcRequest('parity_nextNonce', [from])
.then((nonce) => {
const tx = new EthereumTx({
nonce,
to,
data,
gasLimit: inNumber16(gasLimit),
gasPrice: inNumber16(gasPrice),
value: inNumber16(value)
});
const account = accounts.get(from);
tx.sign(account.decryptPrivateKey(password)); return Promise.all([
this.rpcRequest('parity_nextNonce', [from]),
const serializedTx = `0x${tx.serialize().toString('hex')}`; account.decryptPrivateKey(password)
])
return this.rpcRequest('eth_sendRawTransaction', [serializedTx]); .then(([nonce, privateKey]) => {
}) const tx = new EthereumTx({
.then((hash) => { nonce,
transactions.confirm(id, hash); to,
data,
return {}; gasLimit: inNumber16(gasLimit),
gasPrice: inNumber16(gasPrice),
value: inNumber16(value)
}); });
tx.sign(privateKey);
const serializedTx = `0x${tx.serialize().toString('hex')}`;
return this.rpcRequest('eth_sendRawTransaction', [serializedTx]);
})
.then((hash) => {
transactions.confirm(id, hash);
return {};
});
}); });
register('signer_rejectRequest', ([id]) => { register('signer_rejectRequest', ([id]) => {

View File

@ -80,12 +80,16 @@ export default class JsonRpcBase extends EventEmitter {
const res = middleware.handle(method, params); const res = middleware.handle(method, params);
if (res != null) { if (res != null) {
const result = this._wrapSuccessResult(res); // If `res` isn't a promise, we need to wrap it
const json = this.encode(method, params); return Promise.resolve(res)
.then((res) => {
const result = this._wrapSuccessResult(res);
const json = this.encode(method, params);
Logging.send(method, params, { json, result }); Logging.send(method, params, { json, result });
return res; return res;
});
} }
} }

View File

@ -17,7 +17,7 @@
import { range } from 'lodash'; import { range } from 'lodash';
export function bytesToHex (bytes) { export function bytesToHex (bytes) {
return '0x' + bytes.map((b) => ('0' + b.toString(16)).slice(-2)).join(''); return '0x' + Buffer.from(bytes).toString('hex');
} }
export function cleanupValue (value, type) { export function cleanupValue (value, type) {

View File

@ -23,7 +23,7 @@ const parityNode = (
process.env.PARITY_URL && `http://${process.env.PARITY_URL}` process.env.PARITY_URL && `http://${process.env.PARITY_URL}`
) || ( ) || (
process.env.NODE_ENV === 'production' process.env.NODE_ENV === 'production'
? 'http://127.0.0.1:8080' ? 'http://127.0.0.1:8545'
: '' : ''
); );

View File

@ -23,6 +23,7 @@ import { RadioButton, RadioButtonGroup } from 'material-ui/RadioButton';
import { Form, Input, IdentityIcon } from '~/ui'; import { Form, Input, IdentityIcon } from '~/ui';
import PasswordStrength from '~/ui/Form/PasswordStrength'; import PasswordStrength from '~/ui/Form/PasswordStrength';
import { RefreshIcon } from '~/ui/Icons'; import { RefreshIcon } from '~/ui/Icons';
import Loading from '~/ui/Loading';
import ChangeVault from '../ChangeVault'; import ChangeVault from '../ChangeVault';
import styles from '../createAccount.css'; import styles from '../createAccount.css';
@ -170,7 +171,9 @@ export default class CreateAccount extends Component {
const { accounts } = this.state; const { accounts } = this.state;
if (!accounts) { if (!accounts) {
return null; return (
<Loading className={ styles.selector } size={ 1 } />
);
} }
const identities = Object const identities = Object
@ -205,6 +208,14 @@ export default class CreateAccount extends Component {
createIdentities = () => { createIdentities = () => {
const { createStore } = this.props; const { createStore } = this.props;
this.setState({
accounts: null,
selectedAddress: ''
});
createStore.setAddress('');
createStore.setPhrase('');
return createStore return createStore
.createIdentities() .createIdentities()
.then((accounts) => { .then((accounts) => {

View File

@ -58,12 +58,12 @@ describe('modals/CreateAccount/NewAccount', () => {
return instance.componentWillMount(); return instance.componentWillMount();
}); });
it('creates initial accounts', () => { it('resets the accounts', () => {
expect(Object.keys(instance.state.accounts).length).to.equal(7); expect(instance.state.accounts).to.be.null;
}); });
it('sets the initial selected value', () => { it('resets the initial selected value', () => {
expect(instance.state.selectedAddress).to.equal(Object.keys(instance.state.accounts)[0]); expect(instance.state.selectedAddress).to.equal('');
}); });
}); });
}); });

View File

@ -69,7 +69,7 @@ export default class Store {
return !(this.nameError || this.walletFileError); return !(this.nameError || this.walletFileError);
case 'fromNew': case 'fromNew':
return !(this.nameError || this.passwordRepeatError); return !(this.nameError || this.passwordRepeatError) && this.hasAddress;
case 'fromPhrase': case 'fromPhrase':
return !(this.nameError || this.passwordRepeatError); return !(this.nameError || this.passwordRepeatError);
@ -85,6 +85,10 @@ export default class Store {
} }
} }
@computed get hasAddress () {
return !!(this.address);
}
@computed get passwordRepeatError () { @computed get passwordRepeatError () {
return this.password === this.passwordRepeat return this.password === this.passwordRepeat
? null ? null

View File

@ -329,6 +329,7 @@ describe('modals/CreateAccount/Store', () => {
describe('createType === fromNew', () => { describe('createType === fromNew', () => {
beforeEach(() => { beforeEach(() => {
store.setCreateType('fromNew'); store.setCreateType('fromNew');
store.setAddress('0x0000000000000000000000000000000000000000');
}); });
it('returns true on no errors', () => { it('returns true on no errors', () => {
@ -337,11 +338,13 @@ describe('modals/CreateAccount/Store', () => {
it('returns false on nameError', () => { it('returns false on nameError', () => {
store.setName(''); store.setName('');
expect(store.canCreate).to.be.false; expect(store.canCreate).to.be.false;
}); });
it('returns false on passwordRepeatError', () => { it('returns false on passwordRepeatError', () => {
store.setPassword('testing'); store.setPassword('testing');
expect(store.canCreate).to.be.false; expect(store.canCreate).to.be.false;
}); });
}); });

View File

@ -28,7 +28,7 @@ export default class SecureApi extends Api {
_tokens = []; _tokens = [];
_dappsInterface = null; _dappsInterface = null;
_dappsPort = 8080; _dappsPort = 8545;
_signerPort = 8180; _signerPort = 8180;
static getTransport (url, sysuiToken) { static getTransport (url, sysuiToken) {

View File

@ -171,13 +171,13 @@ function addProxies (app) {
})); }));
app.use('/api', proxy({ app.use('/api', proxy({
target: 'http://127.0.0.1:8080', target: 'http://127.0.0.1:8545',
changeOrigin: true, changeOrigin: true,
autoRewrite: true autoRewrite: true
})); }));
app.use('/app', proxy({ app.use('/app', proxy({
target: 'http://127.0.0.1:8080', target: 'http://127.0.0.1:8545',
changeOrigin: true, changeOrigin: true,
pathRewrite: { pathRewrite: {
'^/app': '' '^/app': ''
@ -193,7 +193,7 @@ function addProxies (app) {
})); }));
app.use('/rpc', proxy({ app.use('/rpc', proxy({
target: 'http://127.0.0.1:8080', target: 'http://127.0.0.1:8545',
changeOrigin: true changeOrigin: true
})); }));
} }

View File

@ -39,7 +39,7 @@ warp = true
allow_ips = "all" allow_ips = "all"
snapshot_peers = 0 snapshot_peers = 0
max_pending_peers = 64 max_pending_peers = 64
serve_light = true no_serve_light = false
reserved_only = false reserved_only = false
reserved_peers = "./path_to_file" reserved_peers = "./path_to_file"

View File

@ -94,6 +94,7 @@ usage! {
flag_chain: String = "foundation", or |c: &Config| otry!(c.parity).chain.clone(), flag_chain: String = "foundation", or |c: &Config| otry!(c.parity).chain.clone(),
flag_keys_path: String = "$BASE/keys", or |c: &Config| otry!(c.parity).keys_path.clone(), flag_keys_path: String = "$BASE/keys", or |c: &Config| otry!(c.parity).keys_path.clone(),
flag_identity: String = "", or |c: &Config| otry!(c.parity).identity.clone(), flag_identity: String = "", or |c: &Config| otry!(c.parity).identity.clone(),
flag_light: bool = false, or |c: &Config| otry!(c.parity).light,
// -- Account Options // -- Account Options
flag_unlock: Option<String> = None, flag_unlock: Option<String> = None,
@ -149,6 +150,8 @@ usage! {
flag_reserved_only: bool = false, flag_reserved_only: bool = false,
or |c: &Config| otry!(c.network).reserved_only.clone(), or |c: &Config| otry!(c.network).reserved_only.clone(),
flag_no_ancient_blocks: bool = false, or |_| None, flag_no_ancient_blocks: bool = false, or |_| None,
flag_no_serve_light: bool = false,
or |c: &Config| otry!(c.network).no_serve_light.clone(),
// -- API and Console Options // -- API and Console Options
// RPC // RPC
@ -379,6 +382,7 @@ struct Operating {
db_path: Option<String>, db_path: Option<String>,
keys_path: Option<String>, keys_path: Option<String>,
identity: Option<String>, identity: Option<String>,
light: Option<bool>,
} }
#[derive(Default, Debug, PartialEq, RustcDecodable)] #[derive(Default, Debug, PartialEq, RustcDecodable)]
@ -414,6 +418,7 @@ struct Network {
node_key: Option<String>, node_key: Option<String>,
reserved_peers: Option<String>, reserved_peers: Option<String>,
reserved_only: Option<bool>, reserved_only: Option<bool>,
no_serve_light: Option<bool>,
} }
#[derive(Default, Debug, PartialEq, RustcDecodable)] #[derive(Default, Debug, PartialEq, RustcDecodable)]
@ -639,6 +644,7 @@ mod tests {
flag_db_path: Some("$HOME/.parity/chains".into()), flag_db_path: Some("$HOME/.parity/chains".into()),
flag_keys_path: "$HOME/.parity/keys".into(), flag_keys_path: "$HOME/.parity/keys".into(),
flag_identity: "".into(), flag_identity: "".into(),
flag_light: false,
// -- Account Options // -- Account Options
flag_unlock: Some("0xdeadbeefcafe0000000000000000000000000000".into()), flag_unlock: Some("0xdeadbeefcafe0000000000000000000000000000".into()),
@ -669,6 +675,7 @@ mod tests {
flag_reserved_peers: Some("./path_to_file".into()), flag_reserved_peers: Some("./path_to_file".into()),
flag_reserved_only: false, flag_reserved_only: false,
flag_no_ancient_blocks: false, flag_no_ancient_blocks: false,
flag_no_serve_light: false,
// -- API and Console Options // -- API and Console Options
// RPC // RPC
@ -844,6 +851,7 @@ mod tests {
db_path: None, db_path: None,
keys_path: None, keys_path: None,
identity: None, identity: None,
light: None,
}), }),
account: Some(Account { account: Some(Account {
unlock: Some(vec!["0x1".into(), "0x2".into(), "0x3".into()]), unlock: Some(vec!["0x1".into(), "0x2".into(), "0x3".into()]),
@ -873,6 +881,7 @@ mod tests {
node_key: None, node_key: None,
reserved_peers: Some("./path/to/reserved_peers".into()), reserved_peers: Some("./path/to/reserved_peers".into()),
reserved_only: Some(true), reserved_only: Some(true),
no_serve_light: None,
}), }),
rpc: Some(Rpc { rpc: Some(Rpc {
disable: Some(true), disable: Some(true),

View File

@ -70,6 +70,11 @@ Operating Options:
--keys-path PATH Specify the path for JSON key files to be found --keys-path PATH Specify the path for JSON key files to be found
(default: {flag_keys_path}). (default: {flag_keys_path}).
--identity NAME Specify your node's name. (default: {flag_identity}) --identity NAME Specify your node's name. (default: {flag_identity})
--light Experimental: run in light client mode. Light clients
synchronize a bare minimum of data and fetch necessary
data on-demand from the network. Much lower in storage,
potentially higher in bandwidth. Has no effect with
subcommands (default: {flag_light}).
Account Options: Account Options:
--unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution. --unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution.
@ -129,6 +134,7 @@ Networking Options:
--max-pending-peers NUM Allow up to NUM pending connections. (default: {flag_max_pending_peers}) --max-pending-peers NUM Allow up to NUM pending connections. (default: {flag_max_pending_peers})
--no-ancient-blocks Disable downloading old blocks after snapshot restoration --no-ancient-blocks Disable downloading old blocks after snapshot restoration
or warp sync. (default: {flag_no_ancient_blocks}) or warp sync. (default: {flag_no_ancient_blocks})
--no-serve-light Disable serving of light peers. (default: {flag_no_serve_light})
API and Console Options: API and Console Options:
--no-jsonrpc Disable the JSON-RPC API server. (default: {flag_no_jsonrpc}) --no-jsonrpc Disable the JSON-RPC API server. (default: {flag_no_jsonrpc})
@ -141,8 +147,9 @@ API and Console Options:
(default: {flag_jsonrpc_cors:?}) (default: {flag_jsonrpc_cors:?})
--jsonrpc-apis APIS Specify the APIs available through the JSONRPC --jsonrpc-apis APIS Specify the APIs available through the JSONRPC
interface. APIS is a comma-delimited list of API interface. APIS is a comma-delimited list of API
name. Possible name are web3, eth, net, personal, name. Possible name are all, safe, web3, eth, net, personal,
parity, parity_set, traces, rpc, parity_accounts. parity, parity_set, traces, rpc, parity_accounts.
You can also disable a specific API by putting '-' in the front: all,-personal
(default: {flag_jsonrpc_apis}). (default: {flag_jsonrpc_apis}).
--jsonrpc-hosts HOSTS List of allowed Host header values. This option will --jsonrpc-hosts HOSTS List of allowed Host header values. This option will
validate the Host header sent by the browser, it validate the Host header sent by the browser, it

View File

@ -30,6 +30,7 @@ use ethcore::miner::{MinerOptions, Banning, StratumOptions};
use ethcore::verification::queue::VerifierSettings; use ethcore::verification::queue::VerifierSettings;
use rpc::{IpcConfiguration, HttpConfiguration}; use rpc::{IpcConfiguration, HttpConfiguration};
use rpc_apis::ApiSet;
use ethcore_rpc::NetworkSettings; use ethcore_rpc::NetworkSettings;
use cache::CacheConfig; use cache::CacheConfig;
use helpers::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_price, replace_home, replace_home_for_db, use helpers::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_price, replace_home, replace_home_for_db,
@ -382,6 +383,8 @@ impl Configuration {
check_seal: !self.args.flag_no_seal_check, check_seal: !self.args.flag_no_seal_check,
download_old_blocks: !self.args.flag_no_ancient_blocks, download_old_blocks: !self.args.flag_no_ancient_blocks,
verifier_settings: verifier_settings, verifier_settings: verifier_settings,
serve_light: !self.args.flag_no_serve_light,
light: self.args.flag_light,
}; };
Cmd::Run(run_cmd) Cmd::Run(run_cmd)
}; };
@ -716,16 +719,7 @@ impl Configuration {
.collect(); .collect();
if self.args.flag_geth { if self.args.flag_geth {
apis.push("personal"); apis.insert(0, "personal");
}
if self.args.flag_public_node {
apis.retain(|api| {
match *api {
"eth" | "net" | "parity" | "rpc" | "web3" => true,
_ => false
}
});
} }
apis.join(",") apis.join(",")
@ -786,7 +780,10 @@ impl Configuration {
enabled: self.rpc_enabled(), enabled: self.rpc_enabled(),
interface: self.rpc_interface(), interface: self.rpc_interface(),
port: self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port), port: self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port),
apis: self.rpc_apis().parse()?, apis: match self.args.flag_public_node {
false => self.rpc_apis().parse()?,
true => self.rpc_apis().parse::<ApiSet>()?.retain(ApiSet::PublicContext),
},
hosts: self.rpc_hosts(), hosts: self.rpc_hosts(),
cors: self.rpc_cors(), cors: self.rpc_cors(),
threads: match self.args.flag_jsonrpc_threads { threads: match self.args.flag_jsonrpc_threads {
@ -1201,6 +1198,8 @@ mod tests {
check_seal: true, check_seal: true,
download_old_blocks: true, download_old_blocks: true,
verifier_settings: Default::default(), verifier_settings: Default::default(),
serve_light: true,
light: false,
}; };
expected.secretstore_conf.enabled = cfg!(feature = "secretstore"); expected.secretstore_conf.enabled = cfg!(feature = "secretstore");
assert_eq!(conf.into_command().unwrap().cmd, Cmd::Run(expected)); assert_eq!(conf.into_command().unwrap().cmd, Cmd::Run(expected));

View File

@ -18,12 +18,14 @@ use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use dir::default_data_path; use dir::default_data_path;
use ethcore::client::Client; use ethcore::client::{Client, BlockChainClient, BlockId};
use ethsync::SyncProvider; use ethcore::transaction::{Transaction, Action};
use hash_fetch::fetch::Client as FetchClient; use hash_fetch::fetch::Client as FetchClient;
use hash_fetch::urlhint::ContractClient;
use helpers::replace_home; use helpers::replace_home;
use rpc_apis::SignerService; use rpc_apis::SignerService;
use parity_reactor; use parity_reactor;
use util::{Bytes, Address, U256};
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone)]
pub struct Configuration { pub struct Configuration {
@ -43,15 +45,53 @@ impl Default for Configuration {
} }
} }
pub struct Dependencies { /// Registrar implementation of the full client.
pub struct FullRegistrar {
/// Handle to the full client.
pub client: Arc<Client>, pub client: Arc<Client>,
pub sync: Arc<SyncProvider>, }
impl ContractClient for FullRegistrar {
fn registrar(&self) -> Result<Address, String> {
self.client.additional_params().get("registrar")
.ok_or_else(|| "Registrar not defined.".into())
.and_then(|registrar| {
registrar.parse().map_err(|e| format!("Invalid registrar address: {:?}", e))
})
}
fn call(&self, address: Address, data: Bytes) -> Result<Bytes, String> {
let from = Address::default();
let transaction = Transaction {
nonce: self.client.latest_nonce(&from),
action: Action::Call(address),
gas: U256::from(50_000_000),
gas_price: U256::default(),
value: U256::default(),
data: data,
}.fake_sign(from);
self.client.call(&transaction, BlockId::Latest, Default::default())
.map_err(|e| format!("{:?}", e))
.map(|executed| {
executed.output
})
}
}
// TODO: light client implementation forwarding to OnDemand and waiting for future
// to resolve.
pub struct Dependencies {
pub sync_status: Arc<SyncStatus>,
pub contract_client: Arc<ContractClient>,
pub remote: parity_reactor::TokioRemote, pub remote: parity_reactor::TokioRemote,
pub fetch: FetchClient, pub fetch: FetchClient,
pub signer: Arc<SignerService>, pub signer: Arc<SignerService>,
} }
pub fn new(configuration: Configuration, deps: Dependencies) -> Result<Option<Middleware>, String> { pub fn new(configuration: Configuration, deps: Dependencies)
-> Result<Option<Middleware>, String>
{
if !configuration.enabled { if !configuration.enabled {
return Ok(None); return Ok(None);
} }
@ -63,8 +103,7 @@ pub fn new(configuration: Configuration, deps: Dependencies) -> Result<Option<Mi
).map(Some) ).map(Some)
} }
pub use self::server::Middleware; pub use self::server::{SyncStatus, Middleware, dapps_middleware};
pub use self::server::dapps_middleware;
#[cfg(not(feature = "dapps"))] #[cfg(not(feature = "dapps"))]
mod server { mod server {
@ -72,11 +111,12 @@ mod server {
use std::path::PathBuf; use std::path::PathBuf;
use ethcore_rpc::{hyper, RequestMiddleware, RequestMiddlewareAction}; use ethcore_rpc::{hyper, RequestMiddleware, RequestMiddlewareAction};
pub struct Middleware; pub type SyncStatus = Fn() -> bool;
pub struct Middleware;
impl RequestMiddleware for Middleware { impl RequestMiddleware for Middleware {
fn on_request( fn on_request(
&self, req: &hyper::server::Request<hyper::net::HttpStream>, control: &hyper::Control &self, _req: &hyper::server::Request<hyper::net::HttpStream>, _control: &hyper::Control
) -> RequestMiddlewareAction { ) -> RequestMiddlewareAction {
unreachable!() unreachable!()
} }
@ -96,28 +136,20 @@ mod server {
use super::Dependencies; use super::Dependencies;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use util::{Bytes, Address, U256};
use ethcore::transaction::{Transaction, Action};
use ethcore::client::{Client, BlockChainClient, BlockId};
use ethcore_rpc::is_major_importing;
use hash_fetch::urlhint::ContractClient;
use parity_dapps; use parity_dapps;
use parity_reactor; use parity_reactor;
pub use parity_dapps::Middleware; pub use parity_dapps::Middleware;
pub use parity_dapps::SyncStatus;
pub fn dapps_middleware( pub fn dapps_middleware(
deps: Dependencies, deps: Dependencies,
dapps_path: PathBuf, dapps_path: PathBuf,
extra_dapps: Vec<PathBuf>, extra_dapps: Vec<PathBuf>,
) -> Result<Middleware, String> { ) -> Result<Middleware, String> {
let sync = deps.sync;
let signer = deps.signer.clone(); let signer = deps.signer.clone();
let client = deps.client;
let parity_remote = parity_reactor::Remote::new(deps.remote.clone()); let parity_remote = parity_reactor::Remote::new(deps.remote.clone());
let registrar = Arc::new(Registrar { client: client.clone() });
let sync_status = Arc::new(move || is_major_importing(Some(sync.status().state), client.queue_info()));
let web_proxy_tokens = Arc::new(move |token| signer.is_valid_web_proxy_access_token(&token)); let web_proxy_tokens = Arc::new(move |token| signer.is_valid_web_proxy_access_token(&token));
Ok(parity_dapps::Middleware::new( Ok(parity_dapps::Middleware::new(
@ -125,42 +157,10 @@ mod server {
deps.signer.address(), deps.signer.address(),
dapps_path, dapps_path,
extra_dapps, extra_dapps,
registrar, deps.contract_client,
sync_status, deps.sync_status,
web_proxy_tokens, web_proxy_tokens,
deps.fetch.clone(), deps.fetch.clone(),
)) ))
} }
struct Registrar {
client: Arc<Client>,
}
impl ContractClient for Registrar {
fn registrar(&self) -> Result<Address, String> {
self.client.additional_params().get("registrar")
.ok_or_else(|| "Registrar not defined.".into())
.and_then(|registrar| {
registrar.parse().map_err(|e| format!("Invalid registrar address: {:?}", e))
})
}
fn call(&self, address: Address, data: Bytes) -> Result<Bytes, String> {
let from = Address::default();
let transaction = Transaction {
nonce: self.client.latest_nonce(&from),
action: Action::Call(address),
gas: U256::from(50_000_000),
gas_price: U256::default(),
value: U256::default(),
data: data,
}.fake_sign(from);
self.client.call(&transaction, BlockId::Latest, Default::default())
.map_err(|e| format!("{:?}", e))
.map(|executed| {
executed.output
})
}
}
} }

View File

@ -14,13 +14,8 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Adds a seventh column for node information. //! Utilities and helpers for the light client.
use util::migration::ChangeColumns; mod queue_cull;
/// The migration from v10 to v11. pub use self::queue_cull::QueueCull;
pub const TO_V11: ChangeColumns = ChangeColumns {
pre_columns: Some(6),
post_columns: Some(7),
version: 11,
};

View File

@ -0,0 +1,99 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Service for culling the light client's transaction queue.
use std::sync::Arc;
use std::time::Duration;
use ethcore::service::ClientIoMessage;
use ethsync::LightSync;
use io::{IoContext, IoHandler, TimerToken};
use light::client::Client;
use light::on_demand::{request, OnDemand};
use light::TransactionQueue;
use futures::{future, stream, Future, Stream};
use parity_reactor::Remote;
use util::RwLock;
// Attepmt to cull once every 10 minutes.
const TOKEN: TimerToken = 1;
const TIMEOUT_MS: u64 = 1000 * 60 * 10;
// But make each attempt last only 9 minutes
const PURGE_TIMEOUT_MS: u64 = 1000 * 60 * 9;
/// Periodically culls the transaction queue of mined transactions.
pub struct QueueCull {
/// A handle to the client, for getting the latest block header.
pub client: Arc<Client>,
/// A handle to the sync service.
pub sync: Arc<LightSync>,
/// The on-demand request service.
pub on_demand: Arc<OnDemand>,
/// The transaction queue.
pub txq: Arc<RwLock<TransactionQueue>>,
/// Event loop remote.
pub remote: Remote,
}
impl IoHandler<ClientIoMessage> for QueueCull {
fn initialize(&self, io: &IoContext<ClientIoMessage>) {
io.register_timer(TOKEN, TIMEOUT_MS).expect("Error registering timer");
}
fn timeout(&self, _io: &IoContext<ClientIoMessage>, timer: TimerToken) {
if timer != TOKEN { return }
let senders = self.txq.read().queued_senders();
if senders.is_empty() { return }
let (sync, on_demand, txq) = (self.sync.clone(), self.on_demand.clone(), self.txq.clone());
let best_header = self.client.best_block_header();
let start_nonce = self.client.engine().account_start_nonce();
info!(target: "cull", "Attempting to cull queued transactions from {} senders.", senders.len());
self.remote.spawn_with_timeout(move || {
let maybe_fetching = sync.with_context(move |ctx| {
// fetch the nonce of each sender in the queue.
let nonce_futures = senders.iter()
.map(|&address| request::Account { header: best_header.clone(), address: address })
.map(|request| on_demand.account(ctx, request))
.map(move |fut| fut.map(move |x| x.map(|acc| acc.nonce).unwrap_or(start_nonce)))
.zip(senders.iter())
.map(|(fut, &addr)| fut.map(move |nonce| (addr, nonce)));
// as they come in, update each sender to the new nonce.
stream::futures_unordered(nonce_futures)
.fold(txq, |txq, (address, nonce)| {
txq.write().cull(address, nonce);
future::ok(txq)
})
.map(|_| ()) // finally, discard the txq handle and log errors.
.map_err(|_| debug!(target: "cull", "OnDemand prematurely closed channel."))
});
match maybe_fetching {
Some(fut) => fut.boxed(),
None => future::ok(()).boxed(),
}
}, Duration::from_millis(PURGE_TIMEOUT_MS), || {})
}
}

View File

@ -28,6 +28,7 @@ extern crate ctrlc;
extern crate docopt; extern crate docopt;
extern crate env_logger; extern crate env_logger;
extern crate fdlimit; extern crate fdlimit;
extern crate futures;
extern crate isatty; extern crate isatty;
extern crate jsonrpc_core; extern crate jsonrpc_core;
extern crate num_cpus; extern crate num_cpus;
@ -105,6 +106,7 @@ mod deprecated;
mod dir; mod dir;
mod helpers; mod helpers;
mod informant; mod informant;
mod light_helpers;
mod migration; mod migration;
mod modules; mod modules;
mod params; mod params;

View File

@ -30,7 +30,7 @@ use ethcore::migrations::Extract;
/// Database is assumed to be at default version, when no version file is found. /// Database is assumed to be at default version, when no version file is found.
const DEFAULT_VERSION: u32 = 5; const DEFAULT_VERSION: u32 = 5;
/// Current version of database models. /// Current version of database models.
const CURRENT_VERSION: u32 = 11; const CURRENT_VERSION: u32 = 12;
/// First version of the consolidated database. /// First version of the consolidated database.
const CONSOLIDATION_VERSION: u32 = 9; const CONSOLIDATION_VERSION: u32 = 9;
/// Defines how many items are migrated to the new version of database at once. /// Defines how many items are migrated to the new version of database at once.
@ -147,6 +147,7 @@ fn consolidated_database_migrations(compaction_profile: &CompactionProfile) -> R
let mut manager = MigrationManager::new(default_migration_settings(compaction_profile)); let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
manager.add_migration(migrations::ToV10::new()).map_err(|_| Error::MigrationImpossible)?; manager.add_migration(migrations::ToV10::new()).map_err(|_| Error::MigrationImpossible)?;
manager.add_migration(migrations::TO_V11).map_err(|_| Error::MigrationImpossible)?; manager.add_migration(migrations::TO_V11).map_err(|_| Error::MigrationImpossible)?;
manager.add_migration(migrations::TO_V12).map_err(|_| Error::MigrationImpossible)?;
Ok(manager) Ok(manager)
} }

View File

@ -81,8 +81,8 @@ impl fmt::Display for IpcConfiguration {
} }
} }
pub struct Dependencies { pub struct Dependencies<D: rpc_apis::Dependencies> {
pub apis: Arc<rpc_apis::Dependencies>, pub apis: Arc<D>,
pub remote: TokioRemote, pub remote: TokioRemote,
pub stats: Arc<RpcStats>, pub stats: Arc<RpcStats>,
} }
@ -112,11 +112,17 @@ impl rpc::IpcMetaExtractor<Metadata> for RpcExtractor {
} }
} }
fn setup_apis(apis: ApiSet, deps: &Dependencies) -> MetaIoHandler<Metadata, Middleware> { fn setup_apis<D>(apis: ApiSet, deps: &Dependencies<D>) -> MetaIoHandler<Metadata, Middleware<D::Notifier>>
rpc_apis::setup_rpc(deps.stats.clone(), deps.apis.clone(), apis) where D: rpc_apis::Dependencies
{
rpc_apis::setup_rpc(deps.stats.clone(), &*deps.apis, apis)
} }
pub fn new_http(conf: HttpConfiguration, deps: &Dependencies, middleware: Option<dapps::Middleware>) -> Result<Option<HttpServer>, String> { pub fn new_http<D: rpc_apis::Dependencies>(
conf: HttpConfiguration,
deps: &Dependencies<D>,
middleware: Option<dapps::Middleware>
) -> Result<Option<HttpServer>, String> {
if !conf.enabled { if !conf.enabled {
return Ok(None); return Ok(None);
} }
@ -157,7 +163,10 @@ pub fn new_http(conf: HttpConfiguration, deps: &Dependencies, middleware: Option
} }
} }
pub fn new_ipc(conf: IpcConfiguration, dependencies: &Dependencies) -> Result<Option<IpcServer>, String> { pub fn new_ipc<D: rpc_apis::Dependencies>(
conf: IpcConfiguration,
dependencies: &Dependencies<D>
) -> Result<Option<IpcServer>, String> {
if !conf.enabled { if !conf.enabled {
return Ok(None); return Ok(None);
} }

View File

@ -27,12 +27,14 @@ use ethcore::client::Client;
use ethcore::miner::{Miner, ExternalMiner}; use ethcore::miner::{Miner, ExternalMiner};
use ethcore::snapshot::SnapshotService; use ethcore::snapshot::SnapshotService;
use ethcore_rpc::{Metadata, NetworkSettings}; use ethcore_rpc::{Metadata, NetworkSettings};
use ethcore_rpc::informant::{Middleware, RpcStats, ClientNotifier}; use ethcore_rpc::informant::{ActivityNotifier, Middleware, RpcStats, ClientNotifier};
use ethcore_rpc::dispatch::FullDispatcher; use ethcore_rpc::dispatch::{FullDispatcher, LightDispatcher};
use ethsync::{ManageNetwork, SyncProvider}; use ethsync::{ManageNetwork, SyncProvider, LightSync};
use hash_fetch::fetch::Client as FetchClient; use hash_fetch::fetch::Client as FetchClient;
use jsonrpc_core::{MetaIoHandler}; use jsonrpc_core::{MetaIoHandler};
use light::{TransactionQueue as LightTransactionQueue, Cache as LightDataCache};
use updater::Updater; use updater::Updater;
use util::{Mutex, RwLock};
use ethcore_logger::RotatingLogger; use ethcore_logger::RotatingLogger;
#[derive(Debug, PartialEq, Clone, Eq, Hash)] #[derive(Debug, PartialEq, Clone, Eq, Hash)]
@ -83,9 +85,17 @@ impl FromStr for Api {
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub enum ApiSet { pub enum ApiSet {
// Safe context (like token-protected WS interface)
SafeContext, SafeContext,
// Unsafe context (like jsonrpc over http)
UnsafeContext, UnsafeContext,
// Public context (like public jsonrpc over http)
PublicContext,
// All possible APIs
All,
// Local "unsafe" context and accounts access
IpcContext, IpcContext,
// Fixed list of APis
List(HashSet<Api>), List(HashSet<Api>),
} }
@ -105,30 +115,31 @@ impl FromStr for ApiSet {
type Err = String; type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
s.split(',') let mut apis = HashSet::new();
.map(Api::from_str)
.collect::<Result<_, _>>()
.map(ApiSet::List)
}
}
pub struct Dependencies { for api in s.split(',') {
pub signer_service: Arc<SignerService>, match api {
pub client: Arc<Client>, "all" => {
pub snapshot: Arc<SnapshotService>, apis.extend(ApiSet::All.list_apis());
pub sync: Arc<SyncProvider>, },
pub net: Arc<ManageNetwork>, "safe" => {
pub secret_store: Option<Arc<AccountProvider>>, // Safe APIs are those that are safe even in UnsafeContext.
pub miner: Arc<Miner>, apis.extend(ApiSet::UnsafeContext.list_apis());
pub external_miner: Arc<ExternalMiner>, },
pub logger: Arc<RotatingLogger>, // Remove the API
pub settings: Arc<NetworkSettings>, api if api.starts_with("-") => {
pub net_service: Arc<ManageNetwork>, let api = api[1..].parse()?;
pub updater: Arc<Updater>, apis.remove(&api);
pub geth_compatibility: bool, },
pub dapps_interface: Option<String>, api => {
pub dapps_port: Option<u16>, let api = api.parse()?;
pub fetch: FetchClient, apis.insert(api);
},
}
}
Ok(ApiSet::List(apis))
}
} }
fn to_modules(apis: &[Api]) -> BTreeMap<String, String> { fn to_modules(apis: &[Api]) -> BTreeMap<String, String> {
@ -151,131 +162,321 @@ fn to_modules(apis: &[Api]) -> BTreeMap<String, String> {
modules modules
} }
/// RPC dependencies can be used to initialize RPC endpoints from APIs.
pub trait Dependencies {
type Notifier: ActivityNotifier;
/// Create the activity notifier.
fn activity_notifier(&self) -> Self::Notifier;
/// Extend the given I/O handler with endpoints for each API.
fn extend_with_set(&self, handler: &mut MetaIoHandler<Metadata, Middleware<Self::Notifier>>, apis: &[Api]);
}
/// RPC dependencies for a full node.
pub struct FullDependencies {
pub signer_service: Arc<SignerService>,
pub client: Arc<Client>,
pub snapshot: Arc<SnapshotService>,
pub sync: Arc<SyncProvider>,
pub net: Arc<ManageNetwork>,
pub secret_store: Option<Arc<AccountProvider>>,
pub miner: Arc<Miner>,
pub external_miner: Arc<ExternalMiner>,
pub logger: Arc<RotatingLogger>,
pub settings: Arc<NetworkSettings>,
pub net_service: Arc<ManageNetwork>,
pub updater: Arc<Updater>,
pub geth_compatibility: bool,
pub dapps_interface: Option<String>,
pub dapps_port: Option<u16>,
pub fetch: FetchClient,
}
impl Dependencies for FullDependencies {
type Notifier = ClientNotifier;
fn activity_notifier(&self) -> ClientNotifier {
ClientNotifier {
client: self.client.clone(),
}
}
fn extend_with_set(&self, handler: &mut MetaIoHandler<Metadata, Middleware>, apis: &[Api]) {
use ethcore_rpc::v1::*;
macro_rules! add_signing_methods {
($namespace:ident, $handler:expr, $deps:expr) => {
{
let deps = &$deps;
let dispatcher = FullDispatcher::new(Arc::downgrade(&deps.client), Arc::downgrade(&deps.miner));
if deps.signer_service.is_enabled() {
$handler.extend_with($namespace::to_delegate(SigningQueueClient::new(&deps.signer_service, dispatcher, &deps.secret_store)))
} else {
$handler.extend_with($namespace::to_delegate(SigningUnsafeClient::new(&deps.secret_store, dispatcher)))
}
}
}
}
let dispatcher = FullDispatcher::new(Arc::downgrade(&self.client), Arc::downgrade(&self.miner));
for api in apis {
match *api {
Api::Web3 => {
handler.extend_with(Web3Client::new().to_delegate());
},
Api::Net => {
handler.extend_with(NetClient::new(&self.sync).to_delegate());
},
Api::Eth => {
let client = EthClient::new(
&self.client,
&self.snapshot,
&self.sync,
&self.secret_store,
&self.miner,
&self.external_miner,
EthClientOptions {
pending_nonce_from_queue: self.geth_compatibility,
allow_pending_receipt_query: !self.geth_compatibility,
send_block_number_in_get_work: !self.geth_compatibility,
}
);
handler.extend_with(client.to_delegate());
let filter_client = EthFilterClient::new(&self.client, &self.miner);
handler.extend_with(filter_client.to_delegate());
add_signing_methods!(EthSigning, handler, self);
},
Api::Personal => {
handler.extend_with(PersonalClient::new(&self.secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate());
},
Api::Signer => {
handler.extend_with(SignerClient::new(&self.secret_store, dispatcher.clone(), &self.signer_service).to_delegate());
},
Api::Parity => {
let signer = match self.signer_service.is_enabled() {
true => Some(self.signer_service.clone()),
false => None,
};
handler.extend_with(ParityClient::new(
&self.client,
&self.miner,
&self.sync,
&self.updater,
&self.net_service,
&self.secret_store,
self.logger.clone(),
self.settings.clone(),
signer,
self.dapps_interface.clone(),
self.dapps_port,
).to_delegate());
add_signing_methods!(EthSigning, handler, self);
add_signing_methods!(ParitySigning, handler, self);
},
Api::ParityAccounts => {
handler.extend_with(ParityAccountsClient::new(&self.secret_store).to_delegate());
},
Api::ParitySet => {
handler.extend_with(ParitySetClient::new(
&self.client,
&self.miner,
&self.updater,
&self.net_service,
self.fetch.clone(),
).to_delegate())
},
Api::Traces => {
handler.extend_with(TracesClient::new(&self.client, &self.miner).to_delegate())
},
Api::Rpc => {
let modules = to_modules(&apis);
handler.extend_with(RpcClient::new(modules).to_delegate());
}
}
}
}
}
/// Light client notifier. Doesn't do anything yet, but might in the future.
pub struct LightClientNotifier;
impl ActivityNotifier for LightClientNotifier {
fn active(&self) {}
}
/// RPC dependencies for a light client.
pub struct LightDependencies {
pub signer_service: Arc<SignerService>,
pub client: Arc<::light::client::Client>,
pub sync: Arc<LightSync>,
pub net: Arc<ManageNetwork>,
pub secret_store: Arc<AccountProvider>,
pub logger: Arc<RotatingLogger>,
pub settings: Arc<NetworkSettings>,
pub on_demand: Arc<::light::on_demand::OnDemand>,
pub cache: Arc<Mutex<LightDataCache>>,
pub transaction_queue: Arc<RwLock<LightTransactionQueue>>,
pub dapps_interface: Option<String>,
pub dapps_port: Option<u16>,
pub fetch: FetchClient,
pub geth_compatibility: bool,
}
impl Dependencies for LightDependencies {
type Notifier = LightClientNotifier;
fn activity_notifier(&self) -> Self::Notifier { LightClientNotifier }
fn extend_with_set(&self, handler: &mut MetaIoHandler<Metadata, Middleware<Self::Notifier>>, apis: &[Api]) {
use ethcore_rpc::v1::*;
let dispatcher = LightDispatcher::new(
self.sync.clone(),
self.client.clone(),
self.on_demand.clone(),
self.cache.clone(),
self.transaction_queue.clone(),
);
macro_rules! add_signing_methods {
($namespace:ident, $handler:expr, $deps:expr) => {
{
let deps = &$deps;
let dispatcher = dispatcher.clone();
let secret_store = Some(deps.secret_store.clone());
if deps.signer_service.is_enabled() {
$handler.extend_with($namespace::to_delegate(
SigningQueueClient::new(&deps.signer_service, dispatcher, &secret_store)
))
} else {
$handler.extend_with(
$namespace::to_delegate(SigningUnsafeClient::new(&secret_store, dispatcher))
)
}
}
}
}
for api in apis {
match *api {
Api::Web3 => {
handler.extend_with(Web3Client::new().to_delegate());
},
Api::Net => {
handler.extend_with(light::NetClient::new(self.sync.clone()).to_delegate());
},
Api::Eth => {
let client = light::EthClient::new(
self.sync.clone(),
self.client.clone(),
self.on_demand.clone(),
self.transaction_queue.clone(),
self.secret_store.clone(),
self.cache.clone(),
);
handler.extend_with(client.to_delegate());
// TODO: filters.
add_signing_methods!(EthSigning, handler, self);
},
Api::Personal => {
let secret_store = Some(self.secret_store.clone());
handler.extend_with(PersonalClient::new(&secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate());
},
Api::Signer => {
let secret_store = Some(self.secret_store.clone());
handler.extend_with(SignerClient::new(&secret_store, dispatcher.clone(), &self.signer_service).to_delegate());
},
Api::Parity => {
let signer = match self.signer_service.is_enabled() {
true => Some(self.signer_service.clone()),
false => None,
};
handler.extend_with(light::ParityClient::new(
Arc::new(dispatcher.clone()),
self.secret_store.clone(),
self.logger.clone(),
self.settings.clone(),
signer,
self.dapps_interface.clone(),
self.dapps_port,
).to_delegate());
add_signing_methods!(EthSigning, handler, self);
add_signing_methods!(ParitySigning, handler, self);
},
Api::ParityAccounts => {
let secret_store = Some(self.secret_store.clone());
handler.extend_with(ParityAccountsClient::new(&secret_store).to_delegate());
},
Api::ParitySet => {
handler.extend_with(light::ParitySetClient::new(
self.sync.clone(),
self.fetch.clone(),
).to_delegate())
},
Api::Traces => {
handler.extend_with(light::TracesClient.to_delegate())
},
Api::Rpc => {
let modules = to_modules(&apis);
handler.extend_with(RpcClient::new(modules).to_delegate());
}
}
}
}
}
impl ApiSet { impl ApiSet {
/// Retains only APIs in given set.
pub fn retain(self, set: Self) -> Self {
ApiSet::List(&self.list_apis() & &set.list_apis())
}
pub fn list_apis(&self) -> HashSet<Api> { pub fn list_apis(&self) -> HashSet<Api> {
let mut safe_list = vec![Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Traces, Api::Rpc] let mut public_list = vec![
.into_iter().collect(); Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Rpc,
].into_iter().collect();
match *self { match *self {
ApiSet::List(ref apis) => apis.clone(), ApiSet::List(ref apis) => apis.clone(),
ApiSet::UnsafeContext => safe_list, ApiSet::PublicContext => public_list,
ApiSet::UnsafeContext => {
public_list.insert(Api::Traces);
public_list
},
ApiSet::IpcContext => { ApiSet::IpcContext => {
safe_list.insert(Api::ParityAccounts); public_list.insert(Api::Traces);
safe_list public_list.insert(Api::ParityAccounts);
public_list
}, },
ApiSet::SafeContext => { ApiSet::SafeContext => {
safe_list.insert(Api::ParityAccounts); public_list.insert(Api::Traces);
safe_list.insert(Api::ParitySet); public_list.insert(Api::ParityAccounts);
safe_list.insert(Api::Signer); public_list.insert(Api::ParitySet);
safe_list public_list.insert(Api::Signer);
public_list
},
ApiSet::All => {
public_list.insert(Api::Traces);
public_list.insert(Api::ParityAccounts);
public_list.insert(Api::ParitySet);
public_list.insert(Api::Signer);
public_list.insert(Api::Personal);
public_list
}, },
} }
} }
} }
macro_rules! add_signing_methods { pub fn setup_rpc<D: Dependencies>(stats: Arc<RpcStats>, deps: &D, apis: ApiSet) -> MetaIoHandler<Metadata, Middleware<D::Notifier>> {
($namespace:ident, $handler:expr, $deps:expr) => { let mut handler = MetaIoHandler::with_middleware(Middleware::new(stats, deps.activity_notifier()));
{
let handler = &mut $handler;
let deps = &$deps;
let dispatcher = FullDispatcher::new(Arc::downgrade(&deps.client), Arc::downgrade(&deps.miner));
if deps.signer_service.is_enabled() {
handler.extend_with($namespace::to_delegate(SigningQueueClient::new(&deps.signer_service, dispatcher, &deps.secret_store)))
} else {
handler.extend_with($namespace::to_delegate(SigningUnsafeClient::new(&deps.secret_store, dispatcher)))
}
}
}
}
pub fn setup_rpc(stats: Arc<RpcStats>, deps: Arc<Dependencies>, apis: ApiSet) -> MetaIoHandler<Metadata, Middleware> {
use ethcore_rpc::v1::*;
let mut handler = MetaIoHandler::with_middleware(Middleware::new(stats, ClientNotifier {
client: deps.client.clone(),
}));
// it's turned into vector, cause ont of the cases requires &[] // it's turned into vector, cause ont of the cases requires &[]
let apis = apis.list_apis().into_iter().collect::<Vec<_>>(); let apis = apis.list_apis().into_iter().collect::<Vec<_>>();
let dispatcher = FullDispatcher::new(Arc::downgrade(&deps.client), Arc::downgrade(&deps.miner)); deps.extend_with_set(&mut handler, &apis[..]);
for api in &apis {
match *api {
Api::Web3 => {
handler.extend_with(Web3Client::new().to_delegate());
},
Api::Net => {
handler.extend_with(NetClient::new(&deps.sync).to_delegate());
},
Api::Eth => {
let client = EthClient::new(
&deps.client,
&deps.snapshot,
&deps.sync,
&deps.secret_store,
&deps.miner,
&deps.external_miner,
EthClientOptions {
pending_nonce_from_queue: deps.geth_compatibility,
allow_pending_receipt_query: !deps.geth_compatibility,
send_block_number_in_get_work: !deps.geth_compatibility,
}
);
handler.extend_with(client.to_delegate());
let filter_client = EthFilterClient::new(&deps.client, &deps.miner);
handler.extend_with(filter_client.to_delegate());
add_signing_methods!(EthSigning, handler, deps);
},
Api::Personal => {
handler.extend_with(PersonalClient::new(&deps.secret_store, dispatcher.clone(), deps.geth_compatibility).to_delegate());
},
Api::Signer => {
handler.extend_with(SignerClient::new(&deps.secret_store, dispatcher.clone(), &deps.signer_service).to_delegate());
},
Api::Parity => {
let signer = match deps.signer_service.is_enabled() {
true => Some(deps.signer_service.clone()),
false => None,
};
handler.extend_with(ParityClient::new(
&deps.client,
&deps.miner,
&deps.sync,
&deps.updater,
&deps.net_service,
&deps.secret_store,
deps.logger.clone(),
deps.settings.clone(),
signer,
deps.dapps_interface.clone(),
deps.dapps_port,
).to_delegate());
add_signing_methods!(EthSigning, handler, deps);
add_signing_methods!(ParitySigning, handler, deps);
},
Api::ParityAccounts => {
handler.extend_with(ParityAccountsClient::new(&deps.secret_store).to_delegate());
},
Api::ParitySet => {
handler.extend_with(ParitySetClient::new(
&deps.client,
&deps.miner,
&deps.updater,
&deps.net_service,
deps.fetch.clone(),
).to_delegate())
},
Api::Traces => {
handler.extend_with(TracesClient::new(&deps.client, &deps.miner).to_delegate())
},
Api::Rpc => {
let modules = to_modules(&apis);
handler.extend_with(RpcClient::new(modules).to_delegate());
}
}
}
handler handler
} }
@ -340,4 +541,30 @@ mod test {
].into_iter().collect(); ].into_iter().collect();
assert_eq!(ApiSet::SafeContext.list_apis(), expected); assert_eq!(ApiSet::SafeContext.list_apis(), expected);
} }
#[test]
fn test_all_apis() {
assert_eq!("all".parse::<ApiSet>().unwrap(), ApiSet::List(vec![
Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Traces, Api::Rpc,
Api::ParityAccounts,
Api::ParitySet, Api::Signer,
Api::Personal
].into_iter().collect()));
}
#[test]
fn test_all_without_personal_apis() {
assert_eq!("personal,all,-personal".parse::<ApiSet>().unwrap(), ApiSet::List(vec![
Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Traces, Api::Rpc,
Api::ParityAccounts,
Api::ParitySet, Api::Signer,
].into_iter().collect()));
}
#[test]
fn test_safe_parsing() {
assert_eq!("safe".parse::<ApiSet>().unwrap(), ApiSet::List(vec![
Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Traces, Api::Rpc,
].into_iter().collect()));
}
} }

View File

@ -30,6 +30,7 @@ use ethcore::account_provider::{AccountProvider, AccountProviderSettings};
use ethcore::miner::{Miner, MinerService, ExternalMiner, MinerOptions}; use ethcore::miner::{Miner, MinerService, ExternalMiner, MinerOptions};
use ethcore::snapshot; use ethcore::snapshot;
use ethcore::verification::queue::VerifierSettings; use ethcore::verification::queue::VerifierSettings;
use light::Cache as LightDataCache;
use ethsync::SyncConfig; use ethsync::SyncConfig;
use informant::Informant; use informant::Informant;
use updater::{UpdatePolicy, Updater}; use updater::{UpdatePolicy, Updater};
@ -60,6 +61,10 @@ const SNAPSHOT_PERIOD: u64 = 10000;
// how many blocks to wait before starting a periodic snapshot. // how many blocks to wait before starting a periodic snapshot.
const SNAPSHOT_HISTORY: u64 = 100; const SNAPSHOT_HISTORY: u64 = 100;
// Number of minutes before a given gas price corpus should expire.
// Light client only.
const GAS_CORPUS_EXPIRATION_MINUTES: i64 = 60 * 6;
// Pops along with error messages when a password is missing or invalid. // Pops along with error messages when a password is missing or invalid.
const VERIFY_PASSWORD_HINT: &'static str = "Make sure valid password is present in files passed using `--password` or in the configuration file."; const VERIFY_PASSWORD_HINT: &'static str = "Make sure valid password is present in files passed using `--password` or in the configuration file.";
@ -107,6 +112,8 @@ pub struct RunCmd {
pub check_seal: bool, pub check_seal: bool,
pub download_old_blocks: bool, pub download_old_blocks: bool,
pub verifier_settings: VerifierSettings, pub verifier_settings: VerifierSettings,
pub serve_light: bool,
pub light: bool,
} }
pub fn open_ui(signer_conf: &signer::Configuration) -> Result<(), String> { pub fn open_ui(signer_conf: &signer::Configuration) -> Result<(), String> {
@ -148,6 +155,171 @@ impl ::local_store::NodeInfo for FullNodeInfo {
} }
} }
// helper for light execution.
fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> Result<(bool, Option<String>), String> {
use light::client as light_client;
use ethsync::{LightSyncParams, LightSync, ManageNetwork};
use util::RwLock;
let panic_handler = PanicHandler::new_in_arc();
// load spec
let spec = cmd.spec.spec()?;
// load genesis hash
let genesis_hash = spec.genesis_header().hash();
// database paths
let db_dirs = cmd.dirs.database(genesis_hash, cmd.spec.legacy_fork_name(), spec.data_dir.clone());
// user defaults path
let user_defaults_path = db_dirs.user_defaults_path();
// load user defaults
let user_defaults = UserDefaults::load(&user_defaults_path)?;
// select pruning algorithm
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
let compaction = cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path());
// execute upgrades
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction.clone())?;
// create dirs used by parity
cmd.dirs.create_dirs(cmd.dapps_conf.enabled, cmd.signer_conf.enabled, cmd.secretstore_conf.enabled)?;
info!("Starting {}", Colour::White.bold().paint(version()));
info!("Running in experimental {} mode.", Colour::Blue.bold().paint("Light Client"));
// start client and create transaction queue.
let mut config = light_client::Config {
queue: Default::default(),
chain_column: ::ethcore::db::COL_LIGHT_CHAIN,
db_cache_size: Some(cmd.cache_config.blockchain() as usize * 1024 * 1024),
db_compaction: compaction,
db_wal: cmd.wal,
};
config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024;
config.queue.verifier_settings = cmd.verifier_settings;
let service = light_client::Service::start(config, &spec, &db_dirs.client_path(algorithm))
.map_err(|e| format!("Error starting light client: {}", e))?;
let txq = Arc::new(RwLock::new(::light::transaction_queue::TransactionQueue::default()));
let provider = ::light::provider::LightProvider::new(service.client().clone(), txq.clone());
// start network.
// set up bootnodes
let mut net_conf = cmd.net_conf;
if !cmd.custom_bootnodes {
net_conf.boot_nodes = spec.nodes.clone();
}
// TODO: configurable cache size.
let cache = LightDataCache::new(Default::default(), ::time::Duration::minutes(GAS_CORPUS_EXPIRATION_MINUTES));
let cache = Arc::new(::util::Mutex::new(cache));
// start on_demand service.
let on_demand = Arc::new(::light::on_demand::OnDemand::new(cache.clone()));
// set network path.
net_conf.net_config_path = Some(db_dirs.network_path().to_string_lossy().into_owned());
let sync_params = LightSyncParams {
network_config: net_conf.into_basic().map_err(|e| format!("Failed to produce network config: {}", e))?,
client: Arc::new(provider),
network_id: cmd.network_id.unwrap_or(spec.network_id()),
subprotocol_name: ::ethsync::LIGHT_PROTOCOL,
handlers: vec![on_demand.clone()],
};
let light_sync = LightSync::new(sync_params).map_err(|e| format!("Error starting network: {}", e))?;
let light_sync = Arc::new(light_sync);
// spin up event loop
let event_loop = EventLoop::spawn();
// queue cull service.
let queue_cull = Arc::new(::light_helpers::QueueCull {
client: service.client().clone(),
sync: light_sync.clone(),
on_demand: on_demand.clone(),
txq: txq.clone(),
remote: event_loop.remote(),
});
service.register_handler(queue_cull).map_err(|e| format!("Error attaching service: {:?}", e))?;
// start the network.
light_sync.start_network();
// fetch service
let fetch = FetchClient::new().map_err(|e| format!("Error starting fetch client: {:?}", e))?;
let passwords = passwords_from_files(&cmd.acc_conf.password_files)?;
// prepare account provider
let account_provider = Arc::new(prepare_account_provider(&cmd.spec, &cmd.dirs, &spec.data_dir, cmd.acc_conf, &passwords)?);
let rpc_stats = Arc::new(informant::RpcStats::default());
let signer_path = cmd.signer_conf.signer_path.clone();
// start RPCs
let deps_for_rpc_apis = Arc::new(rpc_apis::LightDependencies {
signer_service: Arc::new(rpc_apis::SignerService::new(move || {
signer::generate_new_token(signer_path.clone()).map_err(|e| format!("{:?}", e))
}, cmd.ui_address)),
client: service.client().clone(),
sync: light_sync.clone(),
net: light_sync.clone(),
secret_store: account_provider,
logger: logger,
settings: Arc::new(cmd.net_settings),
on_demand: on_demand,
cache: cache,
transaction_queue: txq,
dapps_interface: match cmd.dapps_conf.enabled {
true => Some(cmd.http_conf.interface.clone()),
false => None,
},
dapps_port: match cmd.dapps_conf.enabled {
true => Some(cmd.http_conf.port),
false => None,
},
fetch: fetch,
geth_compatibility: cmd.geth_compatibility,
});
let dependencies = rpc::Dependencies {
apis: deps_for_rpc_apis.clone(),
remote: event_loop.raw_remote(),
stats: rpc_stats.clone(),
};
// start rpc servers
let _http_server = rpc::new_http(cmd.http_conf, &dependencies, None)?;
let _ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?;
// the signer server
let signer_deps = signer::Dependencies {
apis: deps_for_rpc_apis.clone(),
remote: event_loop.raw_remote(),
rpc_stats: rpc_stats.clone(),
};
let signing_queue = deps_for_rpc_apis.signer_service.queue();
let _signer_server = signer::start(cmd.signer_conf.clone(), signing_queue, signer_deps)?;
// TODO: Dapps
// minimal informant thread. Just prints block number every 5 seconds.
// TODO: integrate with informant.rs
let informant_client = service.client().clone();
::std::thread::spawn(move || loop {
info!("#{}", informant_client.best_block_header().number());
::std::thread::sleep(::std::time::Duration::from_secs(5));
});
// wait for ctrl-c.
Ok(wait_for_exit(panic_handler, None, None, can_restart))
}
pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> Result<(bool, Option<String>), String> { pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> Result<(bool, Option<String>), String> {
if cmd.ui && cmd.dapps_conf.enabled { if cmd.ui && cmd.dapps_conf.enabled {
// Check if Parity is already running // Check if Parity is already running
@ -157,12 +329,17 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> R
} }
} }
// set up panic handler
let panic_handler = PanicHandler::new_in_arc();
// increase max number of open files // increase max number of open files
raise_fd_limit(); raise_fd_limit();
// run as light client.
if cmd.light {
return execute_light(cmd, can_restart, logger);
}
// set up panic handler
let panic_handler = PanicHandler::new_in_arc();
// load spec // load spec
let spec = cmd.spec.spec()?; let spec = cmd.spec.spec()?;
@ -244,6 +421,7 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> R
sync_config.fork_block = spec.fork_block(); sync_config.fork_block = spec.fork_block();
sync_config.warp_sync = cmd.warp_sync; sync_config.warp_sync = cmd.warp_sync;
sync_config.download_old_blocks = cmd.download_old_blocks; sync_config.download_old_blocks = cmd.download_old_blocks;
sync_config.serve_light = cmd.serve_light;
let passwords = passwords_from_files(&cmd.acc_conf.password_files)?; let passwords = passwords_from_files(&cmd.acc_conf.password_files)?;
@ -407,7 +585,8 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> R
true => None, true => None,
false => Some(account_provider.clone()) false => Some(account_provider.clone())
}; };
let deps_for_rpc_apis = Arc::new(rpc_apis::Dependencies {
let deps_for_rpc_apis = Arc::new(rpc_apis::FullDependencies {
signer_service: Arc::new(rpc_apis::SignerService::new(move || { signer_service: Arc::new(rpc_apis::SignerService::new(move || {
signer::generate_new_token(signer_path.clone()).map_err(|e| format!("{:?}", e)) signer::generate_new_token(signer_path.clone()).map_err(|e| format!("{:?}", e))
}, cmd.ui_address)), }, cmd.ui_address)),
@ -440,13 +619,18 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> R
stats: rpc_stats.clone(), stats: rpc_stats.clone(),
}; };
// the dapps middleware // the dapps server
let dapps_deps = dapps::Dependencies { let dapps_deps = {
client: client.clone(), let (sync, client) = (sync_provider.clone(), client.clone());
sync: sync_provider.clone(), let contract_client = Arc::new(::dapps::FullRegistrar { client: client.clone() });
remote: event_loop.raw_remote(),
fetch: fetch.clone(), dapps::Dependencies {
signer: deps_for_rpc_apis.signer_service.clone(), sync_status: Arc::new(move || is_major_importing(Some(sync.status().state), client.queue_info())),
contract_client: contract_client,
remote: event_loop.raw_remote(),
fetch: fetch.clone(),
signer: deps_for_rpc_apis.signer_service.clone(),
}
}; };
let dapps_middleware = dapps::new(cmd.dapps_conf.clone(), dapps_deps)?; let dapps_middleware = dapps::new(cmd.dapps_conf.clone(), dapps_deps)?;
@ -460,7 +644,8 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> R
remote: event_loop.raw_remote(), remote: event_loop.raw_remote(),
rpc_stats: rpc_stats.clone(), rpc_stats: rpc_stats.clone(),
}; };
let signer_server = signer::start(cmd.signer_conf.clone(), signer_deps)?; let signing_queue = deps_for_rpc_apis.signer_service.queue();
let signer_server = signer::start(cmd.signer_conf.clone(), signing_queue, signer_deps)?;
// secret store key server // secret store key server
let secretstore_deps = secretstore::Dependencies { let secretstore_deps = secretstore::Dependencies {

View File

@ -23,7 +23,7 @@ pub use ethcore_signer::Server as SignerServer;
use ansi_term::Colour; use ansi_term::Colour;
use dir::default_data_path; use dir::default_data_path;
use ethcore_rpc::informant::RpcStats; use ethcore_rpc::informant::RpcStats;
use ethcore_rpc; use ethcore_rpc::{self, ConfirmationsQueue};
use ethcore_signer as signer; use ethcore_signer as signer;
use helpers::replace_home; use helpers::replace_home;
use parity_reactor::TokioRemote; use parity_reactor::TokioRemote;
@ -55,8 +55,8 @@ impl Default for Configuration {
} }
} }
pub struct Dependencies { pub struct Dependencies<D: rpc_apis::Dependencies> {
pub apis: Arc<rpc_apis::Dependencies>, pub apis: Arc<D>,
pub remote: TokioRemote, pub remote: TokioRemote,
pub rpc_stats: Arc<RpcStats>, pub rpc_stats: Arc<RpcStats>,
} }
@ -77,11 +77,15 @@ impl signer::MetaExtractor<ethcore_rpc::Metadata> for StandardExtractor {
} }
} }
pub fn start(conf: Configuration, deps: Dependencies) -> Result<Option<SignerServer>, String> { pub fn start<D: rpc_apis::Dependencies>(
conf: Configuration,
queue: Arc<ConfirmationsQueue>,
deps: Dependencies<D>,
) -> Result<Option<SignerServer>, String> {
if !conf.enabled { if !conf.enabled {
Ok(None) Ok(None)
} else { } else {
Ok(Some(do_start(conf, deps)?)) Ok(Some(do_start(conf, queue, deps)?))
} }
} }
@ -125,14 +129,18 @@ pub fn generate_new_token(path: String) -> io::Result<String> {
Ok(code) Ok(code)
} }
fn do_start(conf: Configuration, deps: Dependencies) -> Result<SignerServer, String> { fn do_start<D: rpc_apis::Dependencies>(
conf: Configuration,
queue: Arc<ConfirmationsQueue>,
deps: Dependencies<D>
) -> Result<SignerServer, String> {
let addr = format!("{}:{}", conf.interface, conf.port) let addr = format!("{}:{}", conf.interface, conf.port)
.parse() .parse()
.map_err(|_| format!("Invalid port specified: {}", conf.port))?; .map_err(|_| format!("Invalid port specified: {}", conf.port))?;
let start_result = { let start_result = {
let server = signer::ServerBuilder::new( let server = signer::ServerBuilder::new(
deps.apis.signer_service.queue(), queue,
codes_path(conf.signer_path), codes_path(conf.signer_path),
); );
if conf.skip_origin_validation { if conf.skip_origin_validation {
@ -141,7 +149,7 @@ fn do_start(conf: Configuration, deps: Dependencies) -> Result<SignerServer, Str
} }
let server = server.skip_origin_validation(conf.skip_origin_validation); let server = server.skip_origin_validation(conf.skip_origin_validation);
let server = server.stats(deps.rpc_stats.clone()); let server = server.stats(deps.rpc_stats.clone());
let handler = rpc_apis::setup_rpc(deps.rpc_stats, deps.apis, rpc_apis::ApiSet::SafeContext); let handler = rpc_apis::setup_rpc(deps.rpc_stats, &*deps.apis, rpc_apis::ApiSet::SafeContext);
let remote = deps.remote.clone(); let remote = deps.remote.clone();
server.start_with_extractor(addr, handler, remote, StandardExtractor) server.start_with_extractor(addr, handler, remote, StandardExtractor)
}; };

View File

@ -207,7 +207,6 @@ pub fn fetch_gas_price_corpus(
} }
/// Dispatcher for light clients -- fetches default gas price, next nonce, etc. from network. /// Dispatcher for light clients -- fetches default gas price, next nonce, etc. from network.
/// Light client `ETH` RPC.
#[derive(Clone)] #[derive(Clone)]
pub struct LightDispatcher { pub struct LightDispatcher {
/// Sync service. /// Sync service.

View File

@ -166,7 +166,6 @@ impl EthClient {
fn proved_execution(&self, req: CallRequest, num: Trailing<BlockNumber>) -> BoxFuture<ExecutionResult, Error> { fn proved_execution(&self, req: CallRequest, num: Trailing<BlockNumber>) -> BoxFuture<ExecutionResult, Error> {
const DEFAULT_GAS_PRICE: U256 = U256([0, 0, 0, 21_000_000]); const DEFAULT_GAS_PRICE: U256 = U256([0, 0, 0, 21_000_000]);
let (sync, on_demand, client) = (self.sync.clone(), self.on_demand.clone(), self.client.clone()); let (sync, on_demand, client) = (self.sync.clone(), self.on_demand.clone(), self.client.clone());
let req: CRequest = req.into(); let req: CRequest = req.into();
let id = num.0.into(); let id = num.0.into();
@ -245,7 +244,22 @@ impl Eth for EthClient {
} }
fn syncing(&self) -> Result<SyncStatus, Error> { fn syncing(&self) -> Result<SyncStatus, Error> {
rpc_unimplemented!() if self.sync.is_major_importing() {
let chain_info = self.client.chain_info();
let current_block = U256::from(chain_info.best_block_number);
let highest_block = self.sync.highest_block().map(U256::from)
.unwrap_or_else(|| current_block.clone());
Ok(SyncStatus::Info(SyncInfo {
starting_block: U256::from(self.sync.start_block()).into(),
current_block: current_block.into(),
highest_block: highest_block.into(),
warp_chunks_amount: None,
warp_chunks_processed: None,
}))
} else {
Ok(SyncStatus::None)
}
} }
fn author(&self, _meta: Self::Metadata) -> BoxFuture<RpcH160, Error> { fn author(&self, _meta: Self::Metadata) -> BoxFuture<RpcH160, Error> {

View File

@ -23,7 +23,10 @@ pub mod eth;
pub mod parity; pub mod parity;
pub mod parity_set; pub mod parity_set;
pub mod trace; pub mod trace;
pub mod net;
pub use self::eth::EthClient; pub use self::eth::EthClient;
pub use self::parity::ParityClient; pub use self::parity::ParityClient;
pub use self::parity_set::ParitySetClient; pub use self::parity_set::ParitySetClient;
pub use self::net::NetClient;
pub use self::trace::TracesClient;

View File

@ -0,0 +1,49 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Net rpc implementation.
use std::sync::Arc;
use jsonrpc_core::Error;
use ethsync::LightSyncProvider;
use v1::traits::Net;
/// Net rpc implementation.
pub struct NetClient<S: ?Sized> {
sync: Arc<S>
}
impl<S: ?Sized> NetClient<S> where S: LightSyncProvider {
/// Creates new NetClient.
pub fn new(sync: Arc<S>) -> Self {
NetClient {
sync: sync,
}
}
}
impl<S: ?Sized + Sync + Send + 'static> Net for NetClient<S> where S: LightSyncProvider {
fn version(&self) -> Result<String, Error> {
Ok(format!("{}", self.sync.network_id()).to_owned())
}
fn peer_count(&self) -> Result<String, Error> {
Ok(format!("0x{:x}", self.sync.peer_numbers().connected as u64).to_owned())
}
fn is_listening(&self) -> Result<bool, Error> {
Ok(true)
}
}

View File

@ -21,7 +21,7 @@ use ethsync::SyncProvider;
use v1::traits::Net; use v1::traits::Net;
/// Net rpc implementation. /// Net rpc implementation.
pub struct NetClient<S: ?Sized> where S: SyncProvider { pub struct NetClient<S: ?Sized> {
sync: Weak<S> sync: Weak<S>
} }

View File

@ -83,7 +83,7 @@ impl SyncProvider for TestSyncProvider {
difficulty: Some(40.into()), difficulty: Some(40.into()),
head: 50.into(), head: 50.into(),
}), }),
les_info: None, pip_info: None,
}, },
PeerInfo { PeerInfo {
id: None, id: None,
@ -96,7 +96,7 @@ impl SyncProvider for TestSyncProvider {
difficulty: None, difficulty: None,
head: 60.into() head: 60.into()
}), }),
les_info: None, pip_info: None,
} }
] ]
} }

View File

@ -304,7 +304,7 @@ fn rpc_parity_net_peers() {
let io = deps.default_client(); let io = deps.default_client();
let request = r#"{"jsonrpc": "2.0", "method": "parity_netPeers", "params":[], "id": 1}"#; let request = r#"{"jsonrpc": "2.0", "method": "parity_netPeers", "params":[], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":{"active":0,"connected":120,"max":50,"peers":[{"caps":["eth/62","eth/63"],"id":"node1","name":"Parity/1","network":{"localAddress":"127.0.0.1:8888","remoteAddress":"127.0.0.1:7777"},"protocols":{"eth":{"difficulty":"0x28","head":"0000000000000000000000000000000000000000000000000000000000000032","version":62},"les":null}},{"caps":["eth/63","eth/64"],"id":null,"name":"Parity/2","network":{"localAddress":"127.0.0.1:3333","remoteAddress":"Handshake"},"protocols":{"eth":{"difficulty":null,"head":"000000000000000000000000000000000000000000000000000000000000003c","version":64},"les":null}}]},"id":1}"#; let response = r#"{"jsonrpc":"2.0","result":{"active":0,"connected":120,"max":50,"peers":[{"caps":["eth/62","eth/63"],"id":"node1","name":"Parity/1","network":{"localAddress":"127.0.0.1:8888","remoteAddress":"127.0.0.1:7777"},"protocols":{"eth":{"difficulty":"0x28","head":"0000000000000000000000000000000000000000000000000000000000000032","version":62},"pip":null}},{"caps":["eth/63","eth/64"],"id":null,"name":"Parity/2","network":{"localAddress":"127.0.0.1:3333","remoteAddress":"Handshake"},"protocols":{"eth":{"difficulty":null,"head":"000000000000000000000000000000000000000000000000000000000000003c","version":64},"pip":null}}]},"id":1}"#;
assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); assert_eq!(io.handle_request_sync(request), Some(response.to_owned()));
} }

View File

@ -65,7 +65,7 @@ pub use self::receipt::Receipt;
pub use self::rpc_settings::RpcSettings; pub use self::rpc_settings::RpcSettings;
pub use self::sync::{ pub use self::sync::{
SyncStatus, SyncInfo, Peers, PeerInfo, PeerNetworkInfo, PeerProtocolsInfo, SyncStatus, SyncInfo, Peers, PeerInfo, PeerNetworkInfo, PeerProtocolsInfo,
TransactionStats, ChainStatus, EthProtocolInfo, LesProtocolInfo, TransactionStats, ChainStatus, EthProtocolInfo, PipProtocolInfo,
}; };
pub use self::trace::{LocalizedTrace, TraceResults}; pub use self::trace::{LocalizedTrace, TraceResults};
pub use self::trace_filter::TraceFilter; pub use self::trace_filter::TraceFilter;

View File

@ -83,8 +83,8 @@ pub struct PeerNetworkInfo {
pub struct PeerProtocolsInfo { pub struct PeerProtocolsInfo {
/// Ethereum protocol information /// Ethereum protocol information
pub eth: Option<EthProtocolInfo>, pub eth: Option<EthProtocolInfo>,
/// LES protocol information. /// PIP protocol information.
pub les: Option<LesProtocolInfo>, pub pip: Option<PipProtocolInfo>,
} }
/// Peer Ethereum protocol information /// Peer Ethereum protocol information
@ -108,10 +108,10 @@ impl From<ethsync::EthProtocolInfo> for EthProtocolInfo {
} }
} }
/// Peer LES protocol information /// Peer PIP protocol information
#[derive(Default, Debug, Serialize)] #[derive(Default, Debug, Serialize)]
pub struct LesProtocolInfo { pub struct PipProtocolInfo {
/// Negotiated LES protocol version /// Negotiated PIP protocol version
pub version: u32, pub version: u32,
/// Peer total difficulty /// Peer total difficulty
pub difficulty: U256, pub difficulty: U256,
@ -119,9 +119,9 @@ pub struct LesProtocolInfo {
pub head: String, pub head: String,
} }
impl From<ethsync::LesProtocolInfo> for LesProtocolInfo { impl From<ethsync::PipProtocolInfo> for PipProtocolInfo {
fn from(info: ethsync::LesProtocolInfo) -> Self { fn from(info: ethsync::PipProtocolInfo) -> Self {
LesProtocolInfo { PipProtocolInfo {
version: info.version, version: info.version,
difficulty: info.difficulty.into(), difficulty: info.difficulty.into(),
head: info.head.hex(), head: info.head.hex(),
@ -171,7 +171,7 @@ impl From<SyncPeerInfo> for PeerInfo {
}, },
protocols: PeerProtocolsInfo { protocols: PeerProtocolsInfo {
eth: p.eth_info.map(Into::into), eth: p.eth_info.map(Into::into),
les: p.les_info.map(Into::into), pip: p.pip_info.map(Into::into),
}, },
} }
} }

View File

@ -43,7 +43,7 @@ pub const WARP_SYNC_PROTOCOL_ID: ProtocolId = *b"par";
/// Ethereum sync protocol /// Ethereum sync protocol
pub const ETH_PROTOCOL: ProtocolId = *b"eth"; pub const ETH_PROTOCOL: ProtocolId = *b"eth";
/// Ethereum light protocol /// Ethereum light protocol
pub const LIGHT_PROTOCOL: ProtocolId = *b"plp"; pub const LIGHT_PROTOCOL: ProtocolId = *b"pip";
/// Sync configuration /// Sync configuration
#[derive(Debug, Clone, Copy)] #[derive(Debug, Clone, Copy)]
@ -126,7 +126,7 @@ pub struct PeerInfo {
/// Eth protocol info. /// Eth protocol info.
pub eth_info: Option<EthProtocolInfo>, pub eth_info: Option<EthProtocolInfo>,
/// Light protocol info. /// Light protocol info.
pub les_info: Option<LesProtocolInfo>, pub pip_info: Option<PipProtocolInfo>,
} }
/// Ethereum protocol info. /// Ethereum protocol info.
@ -141,10 +141,10 @@ pub struct EthProtocolInfo {
pub difficulty: Option<U256>, pub difficulty: Option<U256>,
} }
/// LES protocol info. /// PIP protocol info.
#[derive(Debug)] #[derive(Debug)]
#[cfg_attr(feature = "ipc", derive(Binary))] #[cfg_attr(feature = "ipc", derive(Binary))]
pub struct LesProtocolInfo { pub struct PipProtocolInfo {
/// Protocol version /// Protocol version
pub version: u32, pub version: u32,
/// SHA3 of peer best block hash /// SHA3 of peer best block hash
@ -153,9 +153,9 @@ pub struct LesProtocolInfo {
pub difficulty: U256, pub difficulty: U256,
} }
impl From<light_net::Status> for LesProtocolInfo { impl From<light_net::Status> for PipProtocolInfo {
fn from(status: light_net::Status) -> Self { fn from(status: light_net::Status) -> Self {
LesProtocolInfo { PipProtocolInfo {
version: status.protocol_version, version: status.protocol_version,
head: status.head_hash, head: status.head_hash,
difficulty: status.head_td, difficulty: status.head_td,
@ -184,7 +184,7 @@ pub struct EthSync {
network: NetworkService, network: NetworkService,
/// Main (eth/par) protocol handler /// Main (eth/par) protocol handler
eth_handler: Arc<SyncProtocolHandler>, eth_handler: Arc<SyncProtocolHandler>,
/// Light (les) protocol handler /// Light (pip) protocol handler
light_proto: Option<Arc<LightProtocol>>, light_proto: Option<Arc<LightProtocol>>,
/// The main subprotocol name /// The main subprotocol name
subprotocol_name: [u8; 3], subprotocol_name: [u8; 3],
@ -264,7 +264,7 @@ impl SyncProvider for EthSync {
remote_address: session_info.remote_address, remote_address: session_info.remote_address,
local_address: session_info.local_address, local_address: session_info.local_address,
eth_info: eth_sync.peer_info(&peer_id), eth_info: eth_sync.peer_info(&peer_id),
les_info: light_proto.as_ref().and_then(|lp| lp.peer_status(&peer_id)).map(Into::into), pip_info: light_proto.as_ref().and_then(|lp| lp.peer_status(&peer_id)).map(Into::into),
}) })
}).collect() }).collect()
}).unwrap_or_else(Vec::new) }).unwrap_or_else(Vec::new)
@ -408,13 +408,13 @@ impl ChainNotify for EthSync {
} }
} }
/// LES event handler. /// PIP event handler.
/// Simply queues transactions from light client peers. /// Simply queues transactions from light client peers.
struct TxRelay(Arc<BlockChainClient>); struct TxRelay(Arc<BlockChainClient>);
impl LightHandler for TxRelay { impl LightHandler for TxRelay {
fn on_transactions(&self, ctx: &EventContext, relay: &[::ethcore::transaction::UnverifiedTransaction]) { fn on_transactions(&self, ctx: &EventContext, relay: &[::ethcore::transaction::UnverifiedTransaction]) {
trace!(target: "les", "Relaying {} transactions from peer {}", relay.len(), ctx.peer()); trace!(target: "pip", "Relaying {} transactions from peer {}", relay.len(), ctx.peer());
self.0.queue_transactions(relay.iter().map(|tx| ::rlp::encode(tx).to_vec()).collect(), ctx.peer()) self.0.queue_transactions(relay.iter().map(|tx| ::rlp::encode(tx).to_vec()).collect(), ctx.peer())
} }
} }
@ -642,6 +642,9 @@ pub trait LightSyncProvider {
/// Get peers information /// Get peers information
fn peers(&self) -> Vec<PeerInfo>; fn peers(&self) -> Vec<PeerInfo>;
/// Get network id.
fn network_id(&self) -> u64;
/// Get the enode if available. /// Get the enode if available.
fn enode(&self) -> Option<String>; fn enode(&self) -> Option<String>;
@ -659,13 +662,17 @@ pub struct LightSyncParams<L> {
pub network_id: u64, pub network_id: u64,
/// Subprotocol name. /// Subprotocol name.
pub subprotocol_name: [u8; 3], pub subprotocol_name: [u8; 3],
/// Other handlers to attach.
pub handlers: Vec<Arc<LightHandler>>,
} }
/// Service for light synchronization. /// Service for light synchronization.
pub struct LightSync { pub struct LightSync {
proto: Arc<LightProtocol>, proto: Arc<LightProtocol>,
sync: Arc<::light_sync::SyncInfo + Sync + Send>,
network: NetworkService, network: NetworkService,
subprotocol_name: [u8; 3], subprotocol_name: [u8; 3],
network_id: u64,
} }
impl LightSync { impl LightSync {
@ -676,7 +683,7 @@ impl LightSync {
use light_sync::LightSync as SyncHandler; use light_sync::LightSync as SyncHandler;
// initialize light protocol handler and attach sync module. // initialize light protocol handler and attach sync module.
let light_proto = { let (sync, light_proto) = {
let light_params = LightParams { let light_params = LightParams {
network_id: params.network_id, network_id: params.network_id,
flow_params: Default::default(), // or `None`? flow_params: Default::default(), // or `None`?
@ -689,18 +696,24 @@ impl LightSync {
}; };
let mut light_proto = LightProtocol::new(params.client.clone(), light_params); let mut light_proto = LightProtocol::new(params.client.clone(), light_params);
let sync_handler = try!(SyncHandler::new(params.client.clone())); let sync_handler = Arc::new(try!(SyncHandler::new(params.client.clone())));
light_proto.add_handler(Arc::new(sync_handler)); light_proto.add_handler(sync_handler.clone());
Arc::new(light_proto) for handler in params.handlers {
light_proto.add_handler(handler);
}
(sync_handler, Arc::new(light_proto))
}; };
let service = try!(NetworkService::new(params.network_config)); let service = try!(NetworkService::new(params.network_config));
Ok(LightSync { Ok(LightSync {
proto: light_proto, proto: light_proto,
sync: sync,
network: service, network: service,
subprotocol_name: params.subprotocol_name, subprotocol_name: params.subprotocol_name,
network_id: params.network_id,
}) })
} }
@ -715,6 +728,12 @@ impl LightSync {
} }
} }
impl ::std::ops::Deref for LightSync {
type Target = ::light_sync::SyncInfo;
fn deref(&self) -> &Self::Target { &*self.sync }
}
impl ManageNetwork for LightSync { impl ManageNetwork for LightSync {
fn accept_unreserved_peers(&self) { fn accept_unreserved_peers(&self) {
self.network.set_non_reserved_mode(NonReservedPeerMode::Accept); self.network.set_non_reserved_mode(NonReservedPeerMode::Accept);
@ -786,7 +805,7 @@ impl LightSyncProvider for LightSync {
remote_address: session_info.remote_address, remote_address: session_info.remote_address,
local_address: session_info.local_address, local_address: session_info.local_address,
eth_info: None, eth_info: None,
les_info: self.proto.peer_status(&peer_id).map(Into::into), pip_info: self.proto.peer_status(&peer_id).map(Into::into),
}) })
}).collect() }).collect()
}).unwrap_or_else(Vec::new) }).unwrap_or_else(Vec::new)
@ -796,6 +815,10 @@ impl LightSyncProvider for LightSync {
self.network.external_url() self.network.external_url()
} }
fn network_id(&self) -> u64 {
self.network_id
}
fn transactions_stats(&self) -> BTreeMap<H256, TransactionStats> { fn transactions_stats(&self) -> BTreeMap<H256, TransactionStats> {
Default::default() // TODO Default::default() // TODO
} }

View File

@ -32,7 +32,7 @@
//! announced blocks. //! announced blocks.
//! - On bad block/response, punish peer and reset. //! - On bad block/response, punish peer and reset.
use std::collections::HashMap; use std::collections::{HashMap, HashSet};
use std::mem; use std::mem;
use std::sync::Arc; use std::sync::Arc;
@ -150,6 +150,19 @@ impl AncestorSearch {
} }
} }
fn requests_abandoned(self, req_ids: &[ReqId]) -> AncestorSearch {
match self {
AncestorSearch::Awaiting(id, start, req) => {
if req_ids.iter().find(|&x| x == &id).is_some() {
AncestorSearch::Queued(start)
} else {
AncestorSearch::Awaiting(id, start, req)
}
}
other => other,
}
}
fn dispatch_request<F>(self, mut dispatcher: F) -> AncestorSearch fn dispatch_request<F>(self, mut dispatcher: F) -> AncestorSearch
where F: FnMut(HeadersRequest) -> Option<ReqId> where F: FnMut(HeadersRequest) -> Option<ReqId>
{ {
@ -206,8 +219,10 @@ impl<'a> ResponseContext for ResponseCtx<'a> {
/// Light client synchronization manager. See module docs for more details. /// Light client synchronization manager. See module docs for more details.
pub struct LightSync<L: AsLightClient> { pub struct LightSync<L: AsLightClient> {
start_block_number: u64,
best_seen: Mutex<Option<ChainInfo>>, // best seen block on the network. best_seen: Mutex<Option<ChainInfo>>, // best seen block on the network.
peers: RwLock<HashMap<PeerId, Mutex<Peer>>>, // peers which are relevant to synchronization. peers: RwLock<HashMap<PeerId, Mutex<Peer>>>, // peers which are relevant to synchronization.
pending_reqs: Mutex<HashSet<ReqId>>, // requests from this handler.
client: Arc<L>, client: Arc<L>,
rng: Mutex<OsRng>, rng: Mutex<OsRng>,
state: Mutex<SyncState>, state: Mutex<SyncState>,
@ -270,7 +285,8 @@ impl<L: AsLightClient + Send + Sync> Handler for LightSync<L> {
*state = match mem::replace(&mut *state, SyncState::Idle) { *state = match mem::replace(&mut *state, SyncState::Idle) {
SyncState::Idle => SyncState::Idle, SyncState::Idle => SyncState::Idle,
SyncState::AncestorSearch(search) => SyncState::AncestorSearch(search), SyncState::AncestorSearch(search) =>
SyncState::AncestorSearch(search.requests_abandoned(unfulfilled)),
SyncState::Rounds(round) => SyncState::Rounds(round.requests_abandoned(unfulfilled)), SyncState::Rounds(round) => SyncState::Rounds(round.requests_abandoned(unfulfilled)),
}; };
} }
@ -320,6 +336,10 @@ impl<L: AsLightClient + Send + Sync> Handler for LightSync<L> {
return return
} }
if !self.pending_reqs.lock().remove(&req_id) {
return
}
let headers = match responses.get(0) { let headers = match responses.get(0) {
Some(&request::Response::Headers(ref response)) => &response.headers[..], Some(&request::Response::Headers(ref response)) => &response.headers[..],
Some(_) => { Some(_) => {
@ -418,8 +438,10 @@ impl<L: AsLightClient> LightSync<L> {
let best_td = chain_info.pending_total_difficulty; let best_td = chain_info.pending_total_difficulty;
let sync_target = match *self.best_seen.lock() { let sync_target = match *self.best_seen.lock() {
Some(ref target) if target.head_td > best_td => (target.head_num, target.head_hash), Some(ref target) if target.head_td > best_td => (target.head_num, target.head_hash),
_ => { ref other => {
trace!(target: "sync", "No target to sync to."); let network_score = other.as_ref().map(|target| target.head_td);
trace!(target: "sync", "No target to sync to. Network score: {:?}, Local score: {:?}",
network_score, best_td);
*state = SyncState::Idle; *state = SyncState::Idle;
return; return;
} }
@ -493,6 +515,7 @@ impl<L: AsLightClient> LightSync<L> {
for peer in &peer_ids { for peer in &peer_ids {
match ctx.request_from(*peer, request.clone()) { match ctx.request_from(*peer, request.clone()) {
Ok(id) => { Ok(id) => {
self.pending_reqs.lock().insert(id.clone());
return Some(id) return Some(id)
} }
Err(NetError::NoCredits) => {} Err(NetError::NoCredits) => {}
@ -523,11 +546,48 @@ impl<L: AsLightClient> LightSync<L> {
/// so it can act on events. /// so it can act on events.
pub fn new(client: Arc<L>) -> Result<Self, ::std::io::Error> { pub fn new(client: Arc<L>) -> Result<Self, ::std::io::Error> {
Ok(LightSync { Ok(LightSync {
start_block_number: client.as_light_client().chain_info().best_block_number,
best_seen: Mutex::new(None), best_seen: Mutex::new(None),
peers: RwLock::new(HashMap::new()), peers: RwLock::new(HashMap::new()),
pending_reqs: Mutex::new(HashSet::new()),
client: client, client: client,
rng: Mutex::new(try!(OsRng::new())), rng: Mutex::new(try!(OsRng::new())),
state: Mutex::new(SyncState::Idle), state: Mutex::new(SyncState::Idle),
}) })
} }
} }
/// Trait for erasing the type of a light sync object and exposing read-only methods.
pub trait SyncInfo {
/// Get the highest block advertised on the network.
fn highest_block(&self) -> Option<u64>;
/// Get the block number at the time of sync start.
fn start_block(&self) -> u64;
/// Whether major sync is underway.
fn is_major_importing(&self) -> bool;
}
impl<L: AsLightClient> SyncInfo for LightSync<L> {
fn highest_block(&self) -> Option<u64> {
self.best_seen.lock().as_ref().map(|x| x.head_num)
}
fn start_block(&self) -> u64 {
self.start_block_number
}
fn is_major_importing(&self) -> bool {
const EMPTY_QUEUE: usize = 3;
if self.client.as_light_client().queue_info().unverified_queue_size > EMPTY_QUEUE {
return true;
}
match *self.state.lock() {
SyncState::Idle => false,
_ => true,
}
}
}

View File

@ -207,7 +207,7 @@ impl TestNet<Peer> {
pub fn light(n_light: usize, n_full: usize) -> Self { pub fn light(n_light: usize, n_full: usize) -> Self {
let mut peers = Vec::with_capacity(n_light + n_full); let mut peers = Vec::with_capacity(n_light + n_full);
for _ in 0..n_light { for _ in 0..n_light {
let client = LightClient::new(Default::default(), &Spec::new_test(), IoChannel::disconnected()); let client = LightClient::in_memory(Default::default(), &Spec::new_test(), IoChannel::disconnected());
peers.push(Arc::new(Peer::new_light(Arc::new(client)))) peers.push(Arc::new(Peer::new_light(Arc::new(client))))
} }

View File

@ -23,11 +23,11 @@ use traits::Encodable;
struct ListInfo { struct ListInfo {
position: usize, position: usize,
current: usize, current: usize,
max: usize, max: Option<usize>,
} }
impl ListInfo { impl ListInfo {
fn new(position: usize, max: usize) -> ListInfo { fn new(position: usize, max: Option<usize>) -> ListInfo {
ListInfo { ListInfo {
position: position, position: position,
current: 0, current: 0,
@ -133,7 +133,7 @@ impl RlpStream {
self.buffer.push(0); self.buffer.push(0);
let position = self.buffer.len(); let position = self.buffer.len();
self.unfinished_lists.push(ListInfo::new(position, len)); self.unfinished_lists.push(ListInfo::new(position, Some(len)));
}, },
} }
@ -141,6 +141,19 @@ impl RlpStream {
self self
} }
/// Declare appending the list of unknown size, chainable.
pub fn begin_unbounded_list(&mut self) -> &mut RlpStream {
self.finished_list = false;
// payload is longer than 1 byte only for lists > 55 bytes
// by pushing always this 1 byte we may avoid unnecessary shift of data
self.buffer.push(0);
let position = self.buffer.len();
self.unfinished_lists.push(ListInfo::new(position, None));
// return chainable self
self
}
/// Apends null to the end of stream, chainable. /// Apends null to the end of stream, chainable.
/// ///
/// ```rust /// ```rust
@ -177,6 +190,36 @@ impl RlpStream {
self self
} }
/// Appends raw (pre-serialised) RLP data. Checks for size oveflow.
pub fn append_raw_checked<'a>(&'a mut self, bytes: &[u8], item_count: usize, max_size: usize) -> bool {
if self.estimate_size(bytes.len()) > max_size {
return false;
}
self.append_raw(bytes, item_count);
true
}
/// Calculate total RLP size for appended payload.
pub fn estimate_size<'a>(&'a self, add: usize) -> usize {
let total_size = self.buffer.len() + add;
let mut base_size = total_size;
for list in &self.unfinished_lists[..] {
let len = total_size - list.position;
if len > 55 {
let leading_empty_bytes = (len as u64).leading_zeros() as usize / 8;
let size_bytes = 8 - leading_empty_bytes;
base_size += size_bytes;
}
}
base_size
}
/// Returns current RLP size in bytes for the data pushed into the list.
pub fn len<'a>(&'a self) -> usize {
self.estimate_size(0)
}
/// Clear the output stream so far. /// Clear the output stream so far.
/// ///
/// ```rust /// ```rust
@ -246,10 +289,11 @@ impl RlpStream {
None => false, None => false,
Some(ref mut x) => { Some(ref mut x) => {
x.current += inserted_items; x.current += inserted_items;
if x.current > x.max { match x.max {
panic!("You cannot append more items then you expect!"); Some(ref max) if x.current > *max => panic!("You cannot append more items then you expect!"),
Some(ref max) => x.current == *max,
_ => false,
} }
x.current == x.max
} }
}; };
@ -273,6 +317,17 @@ impl RlpStream {
false => panic!() false => panic!()
} }
} }
/// Finalize current ubnbound list. Panics if no unbounded list has been opened.
pub fn complete_unbounded_list(&mut self) {
let list = self.unfinished_lists.pop().expect("No open list.");
if list.max.is_some() {
panic!("List type mismatch.");
}
let len = self.buffer.len() - list.position;
self.encoder().insert_list_payload(len, list.position);
self.note_appended(1);
}
} }
pub struct BasicEncoder<'a> { pub struct BasicEncoder<'a> {

View File

@ -412,3 +412,25 @@ fn test_rlp_list_length_overflow() {
let as_val: Result<String, DecoderError> = rlp.val_at(0); let as_val: Result<String, DecoderError> = rlp.val_at(0);
assert_eq!(Err(DecoderError::RlpIsTooShort), as_val); assert_eq!(Err(DecoderError::RlpIsTooShort), as_val);
} }
#[test]
fn test_rlp_stream_size_limit() {
for limit in 40 .. 270 {
let item = [0u8; 1];
let mut stream = RlpStream::new();
while stream.append_raw_checked(&item, 1, limit) {}
assert_eq!(stream.drain().len(), limit);
}
}
#[test]
fn test_rlp_stream_unbounded_list() {
let mut stream = RlpStream::new();
stream.begin_unbounded_list();
stream.append(&40u32);
stream.append(&41u32);
assert!(!stream.is_finished());
stream.complete_unbounded_list();
assert!(stream.is_finished());
}