[beta] Backports (#8558)

* Fetching logs by hash in blockchain database (#8463)

* Fetch logs by hash in blockchain database

* Fix tests

* Add unit test for branch block logs fetching

* Add docs that blocks must already be sorted

* Handle branch block cases properly

* typo: empty -> is_empty

* Remove return_empty_if_none by using a closure

* Use BTreeSet to avoid sorting again

* Move is_canon to BlockChain

* typo: pass value by reference

* Use loop and wrap inside blocks to simplify the code

Borrowed from https://github.com/paritytech/parity/pull/8463#discussion_r183453326

* typo: missed a comment

* Pass on storage keys tracing to handle the case when it is not modified (#8491)

* Pass on storage keys even if it is not modified

* typo: account and storage query

`to_pod_diff` builds both `touched_addresses` merge and storage keys merge.

* Fix tests

* Use state query directly because of suicided accounts

* Fix a RefCell borrow issue

* Add tests for unmodified storage trace

* Address grumbles

* typo: remove unwanted empty line

* ensure_cached compiles with the original signature

* Update wasmi and pwasm-utils (#8493)

* Update wasmi to 0.2

New wasmi supports 32bit platforms and no longer requires a special feature to build for such platforms.

* Update pwasm-utils to 0.1.5

* Show imported messages for light client (#8517)

* Enable WebAssembly and Byzantium for Ellaism (#8520)

* Enable WebAssembly and Byzantium for Ellaism

* Fix indentation

* Remove empty lines

* Don't panic in import_block if invalid rlp (#8522)

* Don't panic in import_block if invalid rlp

* Remove redundant type annotation

* Replace RLP header view usage with safe decoding

Using the view will panic with invalid RLP. Here we use Rlp decoding directly which will return a `Result<_, DecoderError>`. While this path currently should not have any invalid RLP - it makes it safer if ever called with invalid RLP from other code paths.

* Node table sorting according to last contact data (#8541)

* network-devp2p: sort nodes in node table using last contact data

* network-devp2p: rename node contact types in node table json output

* network-devp2p: fix node table tests

* network-devp2p: note node failure when failed to establish connection

* network-devp2p: handle UselessPeer error

* network-devp2p: note failure when marking node as useless
This commit is contained in:
Wei Tang 2018-05-08 02:17:52 +08:00 committed by Afri Schoedon
parent aae451de9e
commit 62ccdd7ad4
16 changed files with 514 additions and 159 deletions

19
Cargo.lock generated
View File

@ -1770,6 +1770,11 @@ dependencies = [
"tiny-keccak 1.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "nan-preserving-float"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "net2" name = "net2"
version = "0.2.31" version = "0.2.31"
@ -2586,7 +2591,7 @@ dependencies = [
[[package]] [[package]]
name = "pwasm-utils" name = "pwasm-utils"
version = "0.1.3" version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
@ -3625,18 +3630,19 @@ dependencies = [
"libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-wasm 0.27.5 (registry+https://github.com/rust-lang/crates.io-index)", "parity-wasm 0.27.5 (registry+https://github.com/rust-lang/crates.io-index)",
"pwasm-utils 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "pwasm-utils 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
"vm 0.1.0", "vm 0.1.0",
"wasmi 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "wasmi 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
name = "wasmi" name = "wasmi"
version = "0.1.3" version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"memory_units 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "memory_units 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"nan-preserving-float 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-wasm 0.27.5 (registry+https://github.com/rust-lang/crates.io-index)", "parity-wasm 0.27.5 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -3888,6 +3894,7 @@ dependencies = [
"checksum msdos_time 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "aad9dfe950c057b1bfe9c1f2aa51583a8468ef2a5baba2ebbe06d775efeb7729" "checksum msdos_time 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "aad9dfe950c057b1bfe9c1f2aa51583a8468ef2a5baba2ebbe06d775efeb7729"
"checksum multibase 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b9c35dac080fd6e16a99924c8dfdef0af89d797dd851adab25feaffacf7850d6" "checksum multibase 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b9c35dac080fd6e16a99924c8dfdef0af89d797dd851adab25feaffacf7850d6"
"checksum multihash 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7d49add5f49eb08bfc4d01ff286b84a48f53d45314f165c2d6efe477222d24f3" "checksum multihash 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7d49add5f49eb08bfc4d01ff286b84a48f53d45314f165c2d6efe477222d24f3"
"checksum nan-preserving-float 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "34d4f00fcc2f4c9efa8cc971db0da9e28290e28e97af47585e48691ef10ff31f"
"checksum net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)" = "3a80f842784ef6c9a958b68b7516bc7e35883c614004dd94959a4dca1b716c09" "checksum net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)" = "3a80f842784ef6c9a958b68b7516bc7e35883c614004dd94959a4dca1b716c09"
"checksum nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "9a2228dca57108069a5262f2ed8bd2e82496d2e074a06d1ccc7ce1687b6ae0a2" "checksum nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "9a2228dca57108069a5262f2ed8bd2e82496d2e074a06d1ccc7ce1687b6ae0a2"
"checksum ntp 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "143149743832c6543b60a8ef2a26cd9122dfecec2b767158e852a7beecf6d7a0" "checksum ntp 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "143149743832c6543b60a8ef2a26cd9122dfecec2b767158e852a7beecf6d7a0"
@ -3928,7 +3935,7 @@ dependencies = [
"checksum proc-macro2 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "388d7ea47318c5ccdeb9ba6312cee7d3f65dd2804be8580a170fce410d50b786" "checksum proc-macro2 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "388d7ea47318c5ccdeb9ba6312cee7d3f65dd2804be8580a170fce410d50b786"
"checksum protobuf 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "40e2484e639dcae0985fc483ad76ce7ad78ee5aa092751d7d538f0b20d76486b" "checksum protobuf 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "40e2484e639dcae0985fc483ad76ce7ad78ee5aa092751d7d538f0b20d76486b"
"checksum pulldown-cmark 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8361e81576d2e02643b04950e487ec172b687180da65c731c03cf336784e6c07" "checksum pulldown-cmark 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8361e81576d2e02643b04950e487ec172b687180da65c731c03cf336784e6c07"
"checksum pwasm-utils 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "54d440c3b56eee028aa5d4f18cbed8c6e0c9ae23563b93f344beb7e73854ea02" "checksum pwasm-utils 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d51e9954a77aab7b4b606dc315a49cbed187924f163b6750cdf6d5677dbf0839"
"checksum quasi 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)" = "18c45c4854d6d1cf5d531db97c75880feb91c958b0720f4ec1057135fec358b3" "checksum quasi 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)" = "18c45c4854d6d1cf5d531db97c75880feb91c958b0720f4ec1057135fec358b3"
"checksum quasi_codegen 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)" = "51b9e25fa23c044c1803f43ca59c98dac608976dd04ce799411edd58ece776d4" "checksum quasi_codegen 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)" = "51b9e25fa23c044c1803f43ca59c98dac608976dd04ce799411edd58ece776d4"
"checksum quasi_macros 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)" = "29cec87bc2816766d7e4168302d505dd06b0a825aed41b00633d296e922e02dd" "checksum quasi_macros 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)" = "29cec87bc2816766d7e4168302d505dd06b0a825aed41b00633d296e922e02dd"
@ -4033,7 +4040,7 @@ dependencies = [
"checksum vergen 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c3365f36c57e5df714a34be40902b27a992eeddb9996eca52d0584611cf885d" "checksum vergen 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c3365f36c57e5df714a34be40902b27a992eeddb9996eca52d0584611cf885d"
"checksum version_check 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6b772017e347561807c1aa192438c5fd74242a670a6cffacc40f2defd1dc069d" "checksum version_check 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6b772017e347561807c1aa192438c5fd74242a670a6cffacc40f2defd1dc069d"
"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" "checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
"checksum wasmi 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d19da510b59247935ad5f598357b3cc739912666d75d3d28318026478d95bbdb" "checksum wasmi 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46df76793c28cd8f590d5667f540a81c1c245440a17b03560e381226e27cf348"
"checksum webpki 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9e1622384bcb5458c6a3e3fa572f53ea8fef1cc85e535a2983dea87e9154fac2" "checksum webpki 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9e1622384bcb5458c6a3e3fa572f53ea8fef1cc85e535a2983dea87e9154fac2"
"checksum webpki-roots 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "155d4060e5befdf3a6076bd28c22513473d9900b763c9e4521acc6f78a75415c" "checksum webpki-roots 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "155d4060e5befdf3a6076bd28c22513473d9900b763c9e4521acc6f78a75415c"
"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" "checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"

View File

@ -30,7 +30,7 @@ use kvdb::KeyValueDB;
use cache::Cache; use cache::Cache;
use parking_lot::Mutex; use parking_lot::Mutex;
use super::{ChainDataFetcher, Client, Config as ClientConfig}; use super::{ChainDataFetcher, LightChainNotify, Client, Config as ClientConfig};
/// Errors on service initialization. /// Errors on service initialization.
#[derive(Debug)] #[derive(Debug)]
@ -86,6 +86,11 @@ impl<T: ChainDataFetcher> Service<T> {
}) })
} }
/// Set the actor to be notified on certain chain events
pub fn add_notify(&self, notify: Arc<LightChainNotify>) {
self.client.add_listener(Arc::downgrade(&notify));
}
/// Register an I/O handler on the service. /// Register an I/O handler on the service.
pub fn register_handler(&self, handler: Arc<IoHandler<ClientIoMessage> + Send>) -> Result<(), IoError> { pub fn register_handler(&self, handler: Arc<IoHandler<ClientIoMessage> + Send>) -> Result<(), IoError> {
self.io_service.register_handler(handler) self.io_service.register_handler(handler)

View File

@ -13,9 +13,9 @@
"eip150Transition": "0x0", "eip150Transition": "0x0",
"eip160Transition": "0x0", "eip160Transition": "0x0",
"ecip1017EraRounds": 10000000, "ecip1017EraRounds": 10000000,
"eip161abcTransition": "0x7fffffffffffffff", "eip161abcTransition": "0x7fffffffffffffff",
"eip161dTransition": "0x7fffffffffffffff" "eip161dTransition": "0x7fffffffffffffff",
"eip100bTransition": 2000000
} }
} }
}, },
@ -29,7 +29,12 @@
"chainID": "0x40", "chainID": "0x40",
"eip155Transition": "0x0", "eip155Transition": "0x0",
"eip98Transition": "0x7fffffffffffff", "eip98Transition": "0x7fffffffffffff",
"eip86Transition": "0x7fffffffffffff" "eip86Transition": "0x7fffffffffffff",
"wasmActivationTransition": 2000000,
"eip140Transition": 2000000,
"eip211Transition": 2000000,
"eip214Transition": 2000000,
"eip658Transition": 2000000
}, },
"genesis": { "genesis": {
"seal": { "seal": {
@ -60,6 +65,10 @@
"0000000000000000000000000000000000000001": { "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } }, "0000000000000000000000000000000000000001": { "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } },
"0000000000000000000000000000000000000002": { "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } }, "0000000000000000000000000000000000000002": { "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } },
"0000000000000000000000000000000000000003": { "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } }, "0000000000000000000000000000000000000003": { "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } },
"0000000000000000000000000000000000000004": { "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } } "0000000000000000000000000000000000000004": { "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } },
"0000000000000000000000000000000000000005": { "builtin": { "name": "modexp", "activate_at": 2000000, "pricing": { "modexp": { "divisor": 20 } } } },
"0000000000000000000000000000000000000006": { "builtin": { "name": "alt_bn128_add", "activate_at": 2000000, "pricing": { "linear": { "base": 500, "word": 0 } } } },
"0000000000000000000000000000000000000007": { "builtin": { "name": "alt_bn128_mul", "activate_at": 2000000, "pricing": { "linear": { "base": 40000, "word": 0 } } } },
"0000000000000000000000000000000000000008": { "builtin": { "name": "alt_bn128_pairing", "activate_at": 2000000, "pricing": { "alt_bn128_pairing": { "base": 100000, "pair": 80000 } } } }
} }
} }

View File

@ -57,6 +57,12 @@ pub trait BlockProvider {
/// (though not necessarily a part of the canon chain). /// (though not necessarily a part of the canon chain).
fn is_known(&self, hash: &H256) -> bool; fn is_known(&self, hash: &H256) -> bool;
/// Returns true if the given block is known and in the canon chain.
fn is_canon(&self, hash: &H256) -> bool {
let is_canon = || Some(hash == &self.block_hash(self.block_number(hash)?)?);
is_canon().unwrap_or(false)
}
/// Get the first block of the best part of the chain. /// Get the first block of the best part of the chain.
/// Return `None` if there is no gap and the first block is the genesis. /// Return `None` if there is no gap and the first block is the genesis.
/// Any queries of blocks which precede this one are not guaranteed to /// Any queries of blocks which precede this one are not guaranteed to
@ -148,7 +154,7 @@ pub trait BlockProvider {
fn blocks_with_bloom(&self, bloom: &Bloom, from_block: BlockNumber, to_block: BlockNumber) -> Vec<BlockNumber>; fn blocks_with_bloom(&self, bloom: &Bloom, from_block: BlockNumber, to_block: BlockNumber) -> Vec<BlockNumber>;
/// Returns logs matching given filter. /// Returns logs matching given filter.
fn logs<F>(&self, blocks: Vec<BlockNumber>, matches: F, limit: Option<usize>) -> Vec<LocalizedLogEntry> fn logs<F>(&self, blocks: Vec<H256>, matches: F, limit: Option<usize>) -> Vec<LocalizedLogEntry>
where F: Fn(&LogEntry) -> bool + Send + Sync, Self: Sized; where F: Fn(&LogEntry) -> bool + Send + Sync, Self: Sized;
} }
@ -332,16 +338,18 @@ impl BlockProvider for BlockChain {
.collect() .collect()
} }
fn logs<F>(&self, mut blocks: Vec<BlockNumber>, matches: F, limit: Option<usize>) -> Vec<LocalizedLogEntry> /// Returns logs matching given filter. The order of logs returned will be the same as the order of the blocks
/// provided. And it's the callers responsibility to sort blocks provided in advance.
fn logs<F>(&self, mut blocks: Vec<H256>, matches: F, limit: Option<usize>) -> Vec<LocalizedLogEntry>
where F: Fn(&LogEntry) -> bool + Send + Sync, Self: Sized { where F: Fn(&LogEntry) -> bool + Send + Sync, Self: Sized {
// sort in reverse order // sort in reverse order
blocks.sort_by(|a, b| b.cmp(a)); blocks.reverse();
let mut logs = blocks let mut logs = blocks
.chunks(128) .chunks(128)
.flat_map(move |blocks_chunk| { .flat_map(move |blocks_chunk| {
blocks_chunk.into_par_iter() blocks_chunk.into_par_iter()
.filter_map(|number| self.block_hash(*number).map(|hash| (*number, hash))) .filter_map(|hash| self.block_number(&hash).map(|r| (r, hash)))
.filter_map(|(number, hash)| self.block_receipts(&hash).map(|r| (number, hash, r.receipts))) .filter_map(|(number, hash)| self.block_receipts(&hash).map(|r| (number, hash, r.receipts)))
.filter_map(|(number, hash, receipts)| self.block_body(&hash).map(|ref b| (number, hash, receipts, b.transaction_hashes()))) .filter_map(|(number, hash, receipts)| self.block_body(&hash).map(|ref b| (number, hash, receipts, b.transaction_hashes())))
.flat_map(|(number, hash, mut receipts, mut hashes)| { .flat_map(|(number, hash, mut receipts, mut hashes)| {
@ -368,7 +376,7 @@ impl BlockProvider for BlockChain {
.enumerate() .enumerate()
.map(move |(i, log)| LocalizedLogEntry { .map(move |(i, log)| LocalizedLogEntry {
entry: log, entry: log,
block_hash: hash, block_hash: *hash,
block_number: number, block_number: number,
transaction_hash: tx_hash, transaction_hash: tx_hash,
// iterating in reverse order // iterating in reverse order
@ -1933,17 +1941,33 @@ mod tests {
value: 103.into(), value: 103.into(),
data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(), data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(),
}.sign(&secret(), None); }.sign(&secret(), None);
let t4 = Transaction {
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Create,
value: 104.into(),
data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(),
}.sign(&secret(), None);
let tx_hash1 = t1.hash(); let tx_hash1 = t1.hash();
let tx_hash2 = t2.hash(); let tx_hash2 = t2.hash();
let tx_hash3 = t3.hash(); let tx_hash3 = t3.hash();
let tx_hash4 = t4.hash();
let genesis = BlockBuilder::genesis(); let genesis = BlockBuilder::genesis();
let b1 = genesis.add_block_with_transactions(vec![t1, t2]); let b1 = genesis.add_block_with_transactions(vec![t1, t2]);
let b2 = b1.add_block_with_transactions(iter::once(t3)); let b2 = b1.add_block_with_transactions(iter::once(t3));
let b3 = genesis.add_block_with(|| BlockOptions {
transactions: vec![t4.clone()],
difficulty: U256::from(9),
..Default::default()
}); // Branch block
let b1_hash = b1.last().hash(); let b1_hash = b1.last().hash();
let b1_number = b1.last().number(); let b1_number = b1.last().number();
let b2_hash = b2.last().hash(); let b2_hash = b2.last().hash();
let b2_number = b2.last().number(); let b2_number = b2.last().number();
let b3_hash = b3.last().hash();
let b3_number = b3.last().number();
let db = new_db(); let db = new_db();
let bc = new_chain(&genesis.last().encoded(), db.clone()); let bc = new_chain(&genesis.last().encoded(), db.clone());
@ -1974,10 +1998,21 @@ mod tests {
], ],
} }
]); ]);
insert_block(&db, &bc, &b3.last().encoded(), vec![
Receipt {
outcome: TransactionOutcome::StateRoot(H256::default()),
gas_used: 10_000.into(),
log_bloom: Default::default(),
logs: vec![
LogEntry { address: Default::default(), topics: vec![], data: vec![5], },
],
}
]);
// when // when
let logs1 = bc.logs(vec![1, 2], |_| true, None); let logs1 = bc.logs(vec![b1_hash, b2_hash], |_| true, None);
let logs2 = bc.logs(vec![1, 2], |_| true, Some(1)); let logs2 = bc.logs(vec![b1_hash, b2_hash], |_| true, Some(1));
let logs3 = bc.logs(vec![b3_hash], |_| true, None);
// then // then
assert_eq!(logs1, vec![ assert_eq!(logs1, vec![
@ -2029,6 +2064,17 @@ mod tests {
log_index: 0, log_index: 0,
} }
]); ]);
assert_eq!(logs3, vec![
LocalizedLogEntry {
entry: LogEntry { address: Default::default(), topics: vec![], data: vec![5] },
block_hash: b3_hash,
block_number: b3_number,
transaction_hash: tx_hash4,
transaction_index: 0,
transaction_log_index: 0,
log_index: 0,
}
]);
} }
#[test] #[test]

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::collections::{HashSet, HashMap, BTreeMap, VecDeque}; use std::collections::{HashSet, HashMap, BTreeMap, BTreeSet, VecDeque};
use std::str::FromStr; use std::str::FromStr;
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering}; use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
@ -452,9 +452,7 @@ impl Importer {
/// ///
/// The block is guaranteed to be the next best blocks in the /// The block is guaranteed to be the next best blocks in the
/// first block sequence. Does no sealing or transaction validation. /// first block sequence. Does no sealing or transaction validation.
fn import_old_block(&self, block_bytes: Bytes, receipts_bytes: Bytes, db: &KeyValueDB, chain: &BlockChain) -> Result<H256, ::error::Error> { fn import_old_block(&self, header: &Header, block_bytes: Bytes, receipts_bytes: Bytes, db: &KeyValueDB, chain: &BlockChain) -> Result<H256, ::error::Error> {
let block = view!(BlockView, &block_bytes);
let header = block.header();
let receipts = ::rlp::decode_list(&receipts_bytes); let receipts = ::rlp::decode_list(&receipts_bytes);
let hash = header.hash(); let hash = header.hash();
let _import_lock = self.import_lock.lock(); let _import_lock = self.import_lock.lock();
@ -1413,7 +1411,7 @@ impl ImportBlock for Client {
use verification::queue::kind::blocks::Unverified; use verification::queue::kind::blocks::Unverified;
// create unverified block here so the `keccak` calculation can be cached. // create unverified block here so the `keccak` calculation can be cached.
let unverified = Unverified::new(bytes); let unverified = Unverified::from_rlp(bytes)?;
{ {
if self.chain.read().is_known(&unverified.hash()) { if self.chain.read().is_known(&unverified.hash()) {
@ -1428,19 +1426,19 @@ impl ImportBlock for Client {
} }
fn import_block_with_receipts(&self, block_bytes: Bytes, receipts_bytes: Bytes) -> Result<H256, BlockImportError> { fn import_block_with_receipts(&self, block_bytes: Bytes, receipts_bytes: Bytes) -> Result<H256, BlockImportError> {
let header: Header = ::rlp::Rlp::new(&block_bytes).val_at(0)?;
{ {
// check block order // check block order
let header = view!(BlockView, &block_bytes).header_view();
if self.chain.read().is_known(&header.hash()) { if self.chain.read().is_known(&header.hash()) {
bail!(BlockImportErrorKind::Import(ImportErrorKind::AlreadyInChain)); bail!(BlockImportErrorKind::Import(ImportErrorKind::AlreadyInChain));
} }
let status = self.block_status(BlockId::Hash(header.parent_hash())); let status = self.block_status(BlockId::Hash(*header.parent_hash()));
if status == BlockStatus::Unknown || status == BlockStatus::Pending { if status == BlockStatus::Unknown || status == BlockStatus::Pending {
bail!(BlockImportErrorKind::Block(BlockError::UnknownParent(header.parent_hash()))); bail!(BlockImportErrorKind::Block(BlockError::UnknownParent(*header.parent_hash())));
} }
} }
self.importer.import_old_block(block_bytes, receipts_bytes, &**self.db.read(), &*self.chain.read()).map_err(Into::into) self.importer.import_old_block(&header, block_bytes, receipts_bytes, &**self.db.read(), &*self.chain.read()).map_err(Into::into)
} }
} }
@ -1853,23 +1851,82 @@ impl BlockChainClient for Client {
} }
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry> { fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry> {
let (from, to) = match (self.block_number_ref(&filter.from_block), self.block_number_ref(&filter.to_block)) { // Wrap the logic inside a closure so that we can take advantage of question mark syntax.
(Some(from), Some(to)) => (from, to), let fetch_logs = || {
_ => return Vec::new(), let chain = self.chain.read();
// First, check whether `filter.from_block` and `filter.to_block` is on the canon chain. If so, we can use the
// optimized version.
let is_canon = |id| {
match id {
// If it is referred by number, then it is always on the canon chain.
&BlockId::Earliest | &BlockId::Latest | &BlockId::Number(_) => true,
// If it is referred by hash, we see whether a hash -> number -> hash conversion gives us the same
// result.
&BlockId::Hash(ref hash) => chain.is_canon(hash),
}
}; };
let chain = self.chain.read(); let blocks = if is_canon(&filter.from_block) && is_canon(&filter.to_block) {
let blocks = filter.bloom_possibilities().iter() // If we are on the canon chain, use bloom filter to fetch required hashes.
.map(move |bloom| { let from = self.block_number_ref(&filter.from_block)?;
let to = self.block_number_ref(&filter.to_block)?;
filter.bloom_possibilities().iter()
.map(|bloom| {
chain.blocks_with_bloom(bloom, from, to) chain.blocks_with_bloom(bloom, from, to)
}) })
.flat_map(|m| m) .flat_map(|m| m)
// remove duplicate elements // remove duplicate elements
.collect::<HashSet<u64>>() .collect::<BTreeSet<u64>>()
.into_iter() .into_iter()
.collect::<Vec<u64>>(); .filter_map(|n| chain.block_hash(n))
.collect::<Vec<H256>>()
self.chain.read().logs(blocks, |entry| filter.matches(entry), filter.limit) } else {
// Otherwise, we use a slower version that finds a link between from_block and to_block.
let from_hash = Self::block_hash(&chain, filter.from_block)?;
let from_number = chain.block_number(&from_hash)?;
let to_hash = Self::block_hash(&chain, filter.from_block)?;
let blooms = filter.bloom_possibilities();
let bloom_match = |header: &encoded::Header| {
blooms.iter().any(|bloom| header.log_bloom().contains_bloom(bloom))
};
let (blocks, last_hash) = {
let mut blocks = Vec::new();
let mut current_hash = to_hash;
loop {
let header = chain.block_header_data(&current_hash)?;
if bloom_match(&header) {
blocks.push(current_hash);
}
// Stop if `from` block is reached.
if header.number() <= from_number {
break;
}
current_hash = header.parent_hash();
}
blocks.reverse();
(blocks, current_hash)
};
// Check if we've actually reached the expected `from` block.
if last_hash != from_hash || blocks.is_empty() {
return None;
}
blocks
};
Some(self.chain.read().logs(blocks, |entry| filter.matches(entry), filter.limit))
};
fetch_logs().unwrap_or_default()
} }
fn filter_traces(&self, filter: TraceFilter) -> Option<Vec<LocalizedTrace>> { fn filter_traces(&self, filter: TraceFilter) -> Option<Vec<LocalizedTrace>> {

View File

@ -190,6 +190,7 @@ error_chain! {
foreign_links { foreign_links {
Block(BlockError) #[doc = "Block error"]; Block(BlockError) #[doc = "Block error"];
Decoder(::rlp::DecoderError) #[doc = "Rlp decoding error"];
} }
errors { errors {
@ -206,6 +207,7 @@ impl From<Error> for BlockImportError {
match e { match e {
Error(ErrorKind::Block(block_error), _) => BlockImportErrorKind::Block(block_error).into(), Error(ErrorKind::Block(block_error), _) => BlockImportErrorKind::Block(block_error).into(),
Error(ErrorKind::Import(import_error), _) => BlockImportErrorKind::Import(import_error.into()).into(), Error(ErrorKind::Import(import_error), _) => BlockImportErrorKind::Import(import_error.into()).into(),
Error(ErrorKind::Util(util_error::ErrorKind::Decoder(decoder_err)), _) => BlockImportErrorKind::Decoder(decoder_err).into(),
_ => BlockImportErrorKind::Other(format!("other block import error: {:?}", e)).into(), _ => BlockImportErrorKind::Other(format!("other block import error: {:?}", e)).into(),
} }
} }

View File

@ -21,7 +21,7 @@
use std::cell::{RefCell, RefMut}; use std::cell::{RefCell, RefMut};
use std::collections::hash_map::Entry; use std::collections::hash_map::Entry;
use std::collections::{HashMap, BTreeMap, HashSet}; use std::collections::{HashMap, BTreeMap, BTreeSet, HashSet};
use std::fmt; use std::fmt;
use std::sync::Arc; use std::sync::Arc;
use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY}; use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY};
@ -862,40 +862,65 @@ impl<B: Backend> State<B> {
})) }))
} }
// Return a list of all touched addresses in cache. /// Populate a PodAccount map from this state, with another state as the account and storage query.
fn touched_addresses(&self) -> Vec<Address> { pub fn to_pod_diff<X: Backend>(&mut self, query: &State<X>) -> trie::Result<PodState> {
assert!(self.checkpoints.borrow().is_empty()); assert!(self.checkpoints.borrow().is_empty());
self.cache.borrow().iter().map(|(add, _)| *add).collect()
// Merge PodAccount::to_pod for cache of self and `query`.
let all_addresses = self.cache.borrow().keys().cloned()
.chain(query.cache.borrow().keys().cloned())
.collect::<BTreeSet<_>>();
Ok(PodState::from(all_addresses.into_iter().fold(Ok(BTreeMap::new()), |m: trie::Result<_>, address| {
let mut m = m?;
let account = self.ensure_cached(&address, RequireCache::Code, true, |acc| {
acc.map(|acc| {
// Merge all modified storage keys.
let all_keys = {
let self_keys = acc.storage_changes().keys().cloned()
.collect::<BTreeSet<_>>();
if let Some(ref query_storage) = query.cache.borrow().get(&address)
.and_then(|opt| {
Some(opt.account.as_ref()?.storage_changes().keys().cloned()
.collect::<BTreeSet<_>>())
})
{
self_keys.union(&query_storage).cloned().collect::<Vec<_>>()
} else {
self_keys.into_iter().collect::<Vec<_>>()
}
};
// Storage must be fetched after ensure_cached to avoid borrow problem.
(*acc.balance(), *acc.nonce(), all_keys, acc.code().map(|x| x.to_vec()))
})
})?;
if let Some((balance, nonce, storage_keys, code)) = account {
let storage = storage_keys.into_iter().fold(Ok(BTreeMap::new()), |s: trie::Result<_>, key| {
let mut s = s?;
s.insert(key, self.storage_at(&address, &key)?);
Ok(s)
})?;
m.insert(address, PodAccount {
balance, nonce, storage, code
});
} }
fn query_pod(&mut self, query: &PodState, touched_addresses: &[Address]) -> trie::Result<()> { Ok(m)
let pod = query.get(); })?))
for address in touched_addresses {
if !self.ensure_cached(address, RequireCache::Code, true, |a| a.is_some())? {
continue
}
if let Some(pod_account) = pod.get(address) {
// needs to be split into two parts for the refcell code here
// to work.
for key in pod_account.storage.keys() {
self.storage_at(address, key)?;
}
}
}
Ok(())
} }
/// Returns a `StateDiff` describing the difference from `orig` to `self`. /// Returns a `StateDiff` describing the difference from `orig` to `self`.
/// Consumes self. /// Consumes self.
pub fn diff_from<X: Backend>(&self, orig: State<X>) -> trie::Result<StateDiff> { pub fn diff_from<X: Backend>(&self, mut orig: State<X>) -> trie::Result<StateDiff> {
let addresses_post = self.touched_addresses();
let pod_state_post = self.to_pod(); let pod_state_post = self.to_pod();
let mut state_pre = orig; let pod_state_pre = orig.to_pod_diff(self)?;
state_pre.query_pod(&pod_state_post, &addresses_post)?; Ok(pod_state::diff_pod(&pod_state_pre, &pod_state_post))
Ok(pod_state::diff_pod(&state_pre.to_pod(), &pod_state_post))
} }
// load required account data from the databases. // load required account data from the databases.
@ -2243,9 +2268,6 @@ mod tests {
let original = state.clone(); let original = state.clone();
state.kill_account(&a); state.kill_account(&a);
assert_eq!(original.touched_addresses(), vec![]);
assert_eq!(state.touched_addresses(), vec![a]);
let diff = state.diff_from(original).unwrap(); let diff = state.diff_from(original).unwrap();
let diff_map = diff.get(); let diff_map = diff.get();
assert_eq!(diff_map.len(), 1); assert_eq!(diff_map.len(), 1);
@ -2258,4 +2280,42 @@ mod tests {
storage: Default::default() storage: Default::default()
}), None).as_ref()); }), None).as_ref());
} }
#[test]
fn should_trace_diff_unmodified_storage() {
use pod_account;
let a = 10.into();
let db = get_temp_state_db();
let (root, db) = {
let mut state = State::new(db, U256::from(0), Default::default());
state.set_storage(&a, H256::from(&U256::from(1u64)), H256::from(&U256::from(20u64))).unwrap();
state.commit().unwrap();
state.drop()
};
let mut state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap();
let original = state.clone();
state.set_storage(&a, H256::from(&U256::from(1u64)), H256::from(&U256::from(100u64))).unwrap();
let diff = state.diff_from(original).unwrap();
let diff_map = diff.get();
assert_eq!(diff_map.len(), 1);
assert!(diff_map.get(&a).is_some());
assert_eq!(diff_map.get(&a),
pod_account::diff_pod(Some(&PodAccount {
balance: U256::zero(),
nonce: U256::zero(),
code: Some(Default::default()),
storage: vec![(H256::from(&U256::from(1u64)), H256::from(&U256::from(20u64)))]
.into_iter().collect(),
}), Some(&PodAccount {
balance: U256::zero(),
nonce: U256::zero(),
code: Some(Default::default()),
storage: vec![(H256::from(&U256::from(1u64)), H256::from(&U256::from(100u64)))]
.into_iter().collect(),
})).as_ref());
}
} }

View File

@ -36,6 +36,7 @@ use views::BlockView;
use ethkey::KeyPair; use ethkey::KeyPair;
use transaction::{PendingTransaction, Transaction, Action, Condition}; use transaction::{PendingTransaction, Transaction, Action, Condition};
use miner::MinerService; use miner::MinerService;
use rlp::{RlpStream, EMPTY_LIST_RLP};
use tempdir::TempDir; use tempdir::TempDir;
#[test] #[test]
@ -111,6 +112,25 @@ fn imports_good_block() {
assert!(!block.into_inner().is_empty()); assert!(!block.into_inner().is_empty());
} }
#[test]
fn fails_to_import_block_with_invalid_rlp() {
use error::{BlockImportError, BlockImportErrorKind};
let client = generate_dummy_client(6);
let mut rlp = RlpStream::new_list(3);
rlp.append_raw(&EMPTY_LIST_RLP, 1); // empty header
rlp.append_raw(&EMPTY_LIST_RLP, 1);
rlp.append_raw(&EMPTY_LIST_RLP, 1);
let invalid_header_block = rlp.out();
match client.import_block(invalid_header_block) {
Err(BlockImportError(BlockImportErrorKind::Decoder(_), _)) => (), // all good
Err(_) => panic!("Should fail with a decoder error"),
Ok(_) => panic!("Should not import block with invalid header"),
}
}
#[test] #[test]
fn query_none_block() { fn query_none_block() {
let tempdir = TempDir::new("").unwrap(); let tempdir = TempDir::new("").unwrap();

View File

@ -119,14 +119,13 @@ pub mod blocks {
impl Unverified { impl Unverified {
/// Create an `Unverified` from raw bytes. /// Create an `Unverified` from raw bytes.
pub fn new(bytes: Bytes) -> Self { pub fn from_rlp(bytes: Bytes) -> Result<Self, ::rlp::DecoderError> {
use views::BlockView;
let header = view!(BlockView, &bytes).header(); let header = ::rlp::Rlp::new(&bytes).val_at(0)?;
Unverified { Ok(Unverified {
header: header, header: header,
bytes: bytes, bytes: bytes,
} })
} }
} }

View File

@ -734,6 +734,7 @@ mod tests {
use test_helpers::{get_good_dummy_block_seq, get_good_dummy_block}; use test_helpers::{get_good_dummy_block_seq, get_good_dummy_block};
use error::*; use error::*;
use views::BlockView; use views::BlockView;
use bytes::Bytes;
// create a test block queue. // create a test block queue.
// auto_scaling enables verifier adjustment. // auto_scaling enables verifier adjustment.
@ -746,6 +747,10 @@ mod tests {
BlockQueue::new(config, engine, IoChannel::disconnected(), true) BlockQueue::new(config, engine, IoChannel::disconnected(), true)
} }
fn new_unverified(bytes: Bytes) -> Unverified {
Unverified::from_rlp(bytes).expect("Should be valid rlp")
}
#[test] #[test]
fn can_be_created() { fn can_be_created() {
// TODO better test // TODO better test
@ -757,7 +762,7 @@ mod tests {
#[test] #[test]
fn can_import_blocks() { fn can_import_blocks() {
let queue = get_test_queue(false); let queue = get_test_queue(false);
if let Err(e) = queue.import(Unverified::new(get_good_dummy_block())) { if let Err(e) = queue.import(new_unverified(get_good_dummy_block())) {
panic!("error importing block that is valid by definition({:?})", e); panic!("error importing block that is valid by definition({:?})", e);
} }
} }
@ -765,11 +770,11 @@ mod tests {
#[test] #[test]
fn returns_error_for_duplicates() { fn returns_error_for_duplicates() {
let queue = get_test_queue(false); let queue = get_test_queue(false);
if let Err(e) = queue.import(Unverified::new(get_good_dummy_block())) { if let Err(e) = queue.import(new_unverified(get_good_dummy_block())) {
panic!("error importing block that is valid by definition({:?})", e); panic!("error importing block that is valid by definition({:?})", e);
} }
let duplicate_import = queue.import(Unverified::new(get_good_dummy_block())); let duplicate_import = queue.import(new_unverified(get_good_dummy_block()));
match duplicate_import { match duplicate_import {
Err(e) => { Err(e) => {
match e { match e {
@ -786,7 +791,7 @@ mod tests {
let queue = get_test_queue(false); let queue = get_test_queue(false);
let block = get_good_dummy_block(); let block = get_good_dummy_block();
let hash = view!(BlockView, &block).header().hash().clone(); let hash = view!(BlockView, &block).header().hash().clone();
if let Err(e) = queue.import(Unverified::new(block)) { if let Err(e) = queue.import(new_unverified(block)) {
panic!("error importing block that is valid by definition({:?})", e); panic!("error importing block that is valid by definition({:?})", e);
} }
queue.flush(); queue.flush();
@ -802,14 +807,14 @@ mod tests {
let queue = get_test_queue(false); let queue = get_test_queue(false);
let block = get_good_dummy_block(); let block = get_good_dummy_block();
let hash = view!(BlockView, &block).header().hash().clone(); let hash = view!(BlockView, &block).header().hash().clone();
if let Err(e) = queue.import(Unverified::new(block)) { if let Err(e) = queue.import(new_unverified(block)) {
panic!("error importing block that is valid by definition({:?})", e); panic!("error importing block that is valid by definition({:?})", e);
} }
queue.flush(); queue.flush();
queue.drain(10); queue.drain(10);
queue.mark_as_good(&[ hash ]); queue.mark_as_good(&[ hash ]);
if let Err(e) = queue.import(Unverified::new(get_good_dummy_block())) { if let Err(e) = queue.import(new_unverified(get_good_dummy_block())) {
panic!("error importing block that has already been drained ({:?})", e); panic!("error importing block that has already been drained ({:?})", e);
} }
} }
@ -817,7 +822,7 @@ mod tests {
#[test] #[test]
fn returns_empty_once_finished() { fn returns_empty_once_finished() {
let queue = get_test_queue(false); let queue = get_test_queue(false);
queue.import(Unverified::new(get_good_dummy_block())) queue.import(new_unverified(get_good_dummy_block()))
.expect("error importing block that is valid by definition"); .expect("error importing block that is valid by definition");
queue.flush(); queue.flush();
queue.drain(1); queue.drain(1);
@ -835,7 +840,7 @@ mod tests {
assert!(!queue.queue_info().is_full()); assert!(!queue.queue_info().is_full());
let mut blocks = get_good_dummy_block_seq(50); let mut blocks = get_good_dummy_block_seq(50);
for b in blocks.drain(..) { for b in blocks.drain(..) {
queue.import(Unverified::new(b)).unwrap(); queue.import(new_unverified(b)).unwrap();
} }
assert!(queue.queue_info().is_full()); assert!(queue.queue_info().is_full());
} }
@ -863,7 +868,7 @@ mod tests {
*queue.state.0.lock() = State::Work(0); *queue.state.0.lock() = State::Work(0);
for block in get_good_dummy_block_seq(5000) { for block in get_good_dummy_block_seq(5000) {
queue.import(Unverified::new(block)).expect("Block good by definition; qed"); queue.import(new_unverified(block)).expect("Block good by definition; qed");
} }
// almost all unverified == bump verifier count. // almost all unverified == bump verifier count.

View File

@ -476,7 +476,7 @@ mod tests {
unimplemented!() unimplemented!()
} }
fn logs<F>(&self, _blocks: Vec<BlockNumber>, _matches: F, _limit: Option<usize>) -> Vec<LocalizedLogEntry> fn logs<F>(&self, _blocks: Vec<H256>, _matches: F, _limit: Option<usize>) -> Vec<LocalizedLogEntry>
where F: Fn(&LogEntry) -> bool, Self: Sized { where F: Fn(&LogEntry) -> bool, Self: Sized {
unimplemented!() unimplemented!()
} }

View File

@ -12,4 +12,4 @@ libc = "0.2"
pwasm-utils = "0.1" pwasm-utils = "0.1"
vm = { path = "../vm" } vm = { path = "../vm" }
ethcore-logger = { path = "../../logger" } ethcore-logger = { path = "../../logger" }
wasmi = { version = "0.1.3", features = ["opt-in-32bit"] } wasmi = { version = "0.2" }

View File

@ -33,7 +33,7 @@ use ethcore::snapshot::service::Service as SnapshotService;
use sync::{LightSyncProvider, LightSync, SyncProvider, ManageNetwork}; use sync::{LightSyncProvider, LightSync, SyncProvider, ManageNetwork};
use io::{TimerToken, IoContext, IoHandler}; use io::{TimerToken, IoContext, IoHandler};
use light::Cache as LightDataCache; use light::Cache as LightDataCache;
use light::client::LightChainClient; use light::client::{LightChainClient, LightChainNotify};
use number_prefix::{binary_prefix, Standalone, Prefixed}; use number_prefix::{binary_prefix, Standalone, Prefixed};
use parity_rpc::{is_major_importing}; use parity_rpc::{is_major_importing};
use parity_rpc::informant::RpcStats; use parity_rpc::informant::RpcStats;
@ -395,6 +395,33 @@ impl ChainNotify for Informant<FullNodeInformantData> {
} }
} }
impl LightChainNotify for Informant<LightNodeInformantData> {
fn new_headers(&self, good: &[H256]) {
let mut last_import = self.last_import.lock();
let client = &self.target.client;
let importing = self.target.is_major_importing();
let ripe = Instant::now() > *last_import + Duration::from_secs(1) && !importing;
if ripe {
if let Some(header) = good.last().and_then(|h| client.block_header(BlockId::Hash(*h))) {
info!(target: "import", "Imported {} {} ({} Mgas){}",
Colour::White.bold().paint(format!("#{}", header.number())),
Colour::White.bold().paint(format!("{}", header.hash())),
Colour::Yellow.bold().paint(format!("{:.2}", header.gas_used().low_u64() as f32 / 1000000f32)),
if good.len() > 1 {
format!(" + another {} header(s)",
Colour::Red.bold().paint(format!("{}", good.len() - 1)))
} else {
String::new()
}
);
*last_import = Instant::now();
}
}
}
}
const INFO_TIMER: TimerToken = 0; const INFO_TIMER: TimerToken = 0;
impl<T: InformantData> IoHandler<ClientIoMessage> for Informant<T> { impl<T: InformantData> IoHandler<ClientIoMessage> for Informant<T> {

View File

@ -412,7 +412,7 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<Runnin
Some(rpc_stats), Some(rpc_stats),
cmd.logger_config.color, cmd.logger_config.color,
)); ));
service.add_notify(informant.clone());
service.register_handler(informant.clone()).map_err(|_| "Unable to register informant handler".to_owned())?; service.register_handler(informant.clone()).map_err(|_| "Unable to register informant handler".to_owned())?;
Ok(RunningClient::Light { Ok(RunningClient::Light {

View File

@ -105,10 +105,13 @@ pub struct NetworkContext<'s> {
impl<'s> NetworkContext<'s> { impl<'s> NetworkContext<'s> {
/// Create a new network IO access point. Takes references to all the data that can be updated within the IO handler. /// Create a new network IO access point. Takes references to all the data that can be updated within the IO handler.
fn new(io: &'s IoContext<NetworkIoMessage>, fn new(
io: &'s IoContext<NetworkIoMessage>,
protocol: ProtocolId, protocol: ProtocolId,
session: Option<SharedSession>, sessions: Arc<RwLock<Slab<SharedSession>>>, session: Option<SharedSession>,
reserved_peers: &'s HashSet<NodeId>) -> NetworkContext<'s> { sessions: Arc<RwLock<Slab<SharedSession>>>,
reserved_peers: &'s HashSet<NodeId>,
) -> NetworkContext<'s> {
let id = session.as_ref().map(|s| s.lock().token()); let id = session.as_ref().map(|s| s.lock().token());
NetworkContext { NetworkContext {
io: io, io: io,
@ -585,10 +588,8 @@ impl Host {
let address = { let address = {
let mut nodes = self.nodes.write(); let mut nodes = self.nodes.write();
if let Some(node) = nodes.get_mut(id) { if let Some(node) = nodes.get_mut(id) {
node.attempts += 1;
node.endpoint.address node.endpoint.address
} } else {
else {
debug!(target: "network", "Connection to expired node aborted"); debug!(target: "network", "Connection to expired node aborted");
return; return;
} }
@ -600,6 +601,7 @@ impl Host {
}, },
Err(e) => { Err(e) => {
debug!(target: "network", "{}: Can't connect to address {:?}: {:?}", id, address, e); debug!(target: "network", "{}: Can't connect to address {:?}: {:?}", id, address, e);
self.nodes.write().note_failure(&id);
return; return;
} }
} }
@ -685,10 +687,12 @@ impl Host {
Err(e) => { Err(e) => {
let s = session.lock(); let s = session.lock();
trace!(target: "network", "Session read error: {}:{:?} ({:?}) {:?}", token, s.id(), s.remote_addr(), e); trace!(target: "network", "Session read error: {}:{:?} ({:?}) {:?}", token, s.id(), s.remote_addr(), e);
if let ErrorKind::Disconnect(DisconnectReason::IncompatibleProtocol) = *e.kind() { if let ErrorKind::Disconnect(DisconnectReason::UselessPeer) = *e.kind() {
if let Some(id) = s.id() { if let Some(id) = s.id() {
if !self.reserved_nodes.read().contains(id) { if !self.reserved_nodes.read().contains(id) {
self.nodes.write().mark_as_useless(id); let mut nodes = self.nodes.write();
nodes.note_failure(&id);
nodes.mark_as_useless(id);
} }
} }
} }
@ -754,6 +758,10 @@ impl Host {
} }
} }
} }
// Note connection success
self.nodes.write().note_success(&id);
for (p, _) in self.handlers.read().iter() { for (p, _) in self.handlers.read().iter() {
if s.have_capability(*p) { if s.have_capability(*p) {
ready_data.push(*p); ready_data.push(*p);
@ -1024,7 +1032,9 @@ impl IoHandler<NetworkIoMessage> for Host {
if let Some(session) = session { if let Some(session) = session {
session.lock().disconnect(io, DisconnectReason::DisconnectRequested); session.lock().disconnect(io, DisconnectReason::DisconnectRequested);
if let Some(id) = session.lock().id() { if let Some(id) = session.lock().id() {
self.nodes.write().mark_as_useless(id) let mut nodes = self.nodes.write();
nodes.note_failure(&id);
nodes.mark_as_useless(id);
} }
} }
trace!(target: "network", "Disabling peer {}", peer); trace!(target: "network", "Disabling peer {}", peer);

View File

@ -21,6 +21,8 @@ use std::net::{SocketAddr, ToSocketAddrs, SocketAddrV4, SocketAddrV6, Ipv4Addr,
use std::path::PathBuf; use std::path::PathBuf;
use std::str::FromStr; use std::str::FromStr;
use std::{fs, mem, slice}; use std::{fs, mem, slice};
use std::time::{self, Duration, SystemTime};
use rand::{self, Rng};
use ethereum_types::H512; use ethereum_types::H512;
use rlp::{Rlp, RlpStream, DecoderError}; use rlp::{Rlp, RlpStream, DecoderError};
use network::{Error, ErrorKind, AllowIP, IpFilter}; use network::{Error, ErrorKind, AllowIP, IpFilter};
@ -128,40 +130,64 @@ impl FromStr for NodeEndpoint {
} }
} }
#[derive(PartialEq, Eq, Copy, Clone)] #[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum PeerType { pub enum PeerType {
_Required, _Required,
Optional Optional
} }
/// A type for representing an interaction (contact) with a node at a given time
/// that was either a success or a failure.
#[derive(Clone, Copy, Debug)]
pub enum NodeContact {
Success(SystemTime),
Failure(SystemTime),
}
impl NodeContact {
fn success() -> NodeContact {
NodeContact::Success(SystemTime::now())
}
fn failure() -> NodeContact {
NodeContact::Failure(SystemTime::now())
}
fn time(&self) -> SystemTime {
match *self {
NodeContact::Success(t) | NodeContact::Failure(t) => t
}
}
/// Filters and old contact, returning `None` if it happened longer than a
/// week ago.
fn recent(&self) -> Option<&NodeContact> {
let t = self.time();
if let Ok(d) = t.elapsed() {
if d < Duration::from_secs(60 * 60 * 24 * 7) {
return Some(self);
}
}
None
}
}
#[derive(Debug)]
pub struct Node { pub struct Node {
pub id: NodeId, pub id: NodeId,
pub endpoint: NodeEndpoint, pub endpoint: NodeEndpoint,
pub peer_type: PeerType, pub peer_type: PeerType,
pub attempts: u32, pub last_contact: Option<NodeContact>,
pub failures: u32,
} }
const DEFAULT_FAILURE_PERCENTAGE: usize = 50;
impl Node { impl Node {
pub fn new(id: NodeId, endpoint: NodeEndpoint) -> Node { pub fn new(id: NodeId, endpoint: NodeEndpoint) -> Node {
Node { Node {
id: id, id: id,
endpoint: endpoint, endpoint: endpoint,
peer_type: PeerType::Optional, peer_type: PeerType::Optional,
attempts: 0, last_contact: None,
failures: 0,
}
}
/// Returns the node's failure percentage (0..100) in buckets of 5%. If there are 0 connection attempts for this
/// node the default failure percentage is returned (50%).
pub fn failure_percentage(&self) -> usize {
if self.attempts == 0 {
DEFAULT_FAILURE_PERCENTAGE
} else {
(self.failures * 100 / self.attempts / 5 * 5) as usize
} }
} }
} }
@ -191,8 +217,7 @@ impl FromStr for Node {
id: id, id: id,
endpoint: endpoint, endpoint: endpoint,
peer_type: PeerType::Optional, peer_type: PeerType::Optional,
attempts: 0, last_contact: None,
failures: 0,
}) })
} }
} }
@ -231,28 +256,61 @@ impl NodeTable {
/// Add a node to table /// Add a node to table
pub fn add_node(&mut self, mut node: Node) { pub fn add_node(&mut self, mut node: Node) {
// preserve attempts and failure counter // preserve node last_contact
let (attempts, failures) = node.last_contact = self.nodes.get(&node.id).and_then(|n| n.last_contact);
self.nodes.get(&node.id).map_or((0, 0), |n| (n.attempts, n.failures));
node.attempts = attempts;
node.failures = failures;
self.nodes.insert(node.id.clone(), node); self.nodes.insert(node.id.clone(), node);
} }
/// Returns a list of ordered nodes according to their most recent contact
/// and filtering useless nodes. The algorithm for creating the sorted nodes
/// is:
/// - Contacts that aren't recent (older than 1 week) are discarded
/// - (1) Nodes with a successful contact are ordered (most recent success first)
/// - (2) Nodes with unknown contact (older than 1 week or new nodes) are randomly shuffled
/// - (3) Nodes with a failed contact are ordered (oldest failure first)
/// - The final result is the concatenation of (1), (2) and (3)
fn ordered_entries(&self) -> Vec<&Node> { fn ordered_entries(&self) -> Vec<&Node> {
let mut refs: Vec<&Node> = self.nodes.values() let mut success = Vec::new();
.filter(|n| !self.useless_nodes.contains(&n.id)) let mut failures = Vec::new();
.collect(); let mut unknown = Vec::new();
refs.sort_by(|a, b| { let nodes = self.nodes.values()
a.failure_percentage().cmp(&b.failure_percentage()) .filter(|n| !self.useless_nodes.contains(&n.id));
.then_with(|| a.failures.cmp(&b.failures))
.then_with(|| b.attempts.cmp(&a.attempts)) // we use reverse ordering for number of attempts for node in nodes {
// discard contact points older that aren't recent
match node.last_contact.as_ref().and_then(|c| c.recent()) {
Some(&NodeContact::Success(_)) => {
success.push(node);
},
Some(&NodeContact::Failure(_)) => {
failures.push(node);
},
None => {
unknown.push(node);
},
}
}
success.sort_by(|a, b| {
let a = a.last_contact.expect("vector only contains values with defined last_contact; qed");
let b = b.last_contact.expect("vector only contains values with defined last_contact; qed");
// inverse ordering, most recent successes come first
b.time().cmp(&a.time())
}); });
refs failures.sort_by(|a, b| {
let a = a.last_contact.expect("vector only contains values with defined last_contact; qed");
let b = b.last_contact.expect("vector only contains values with defined last_contact; qed");
// normal ordering, most distant failures come first
a.time().cmp(&b.time())
});
rand::thread_rng().shuffle(&mut unknown);
success.append(&mut unknown);
success.append(&mut failures);
success
} }
/// Returns node ids sorted by failure percentage, for nodes with the same failure percentage the absolute number of /// Returns node ids sorted by failure percentage, for nodes with the same failure percentage the absolute number of
@ -296,10 +354,17 @@ impl NodeTable {
} }
} }
/// Increase failure counte for a node /// Set last contact as failure for a node
pub fn note_failure(&mut self, id: &NodeId) { pub fn note_failure(&mut self, id: &NodeId) {
if let Some(node) = self.nodes.get_mut(id) { if let Some(node) = self.nodes.get_mut(id) {
node.failures += 1; node.last_contact = Some(NodeContact::failure());
}
}
/// Set last contact as success for a node
pub fn note_success(&mut self, id: &NodeId) {
if let Some(node) = self.nodes.get_mut(id) {
node.last_contact = Some(NodeContact::success());
} }
} }
@ -396,19 +461,38 @@ mod json {
pub nodes: Vec<Node>, pub nodes: Vec<Node>,
} }
#[derive(Serialize, Deserialize)]
pub enum NodeContact {
#[serde(rename = "success")]
Success(u64),
#[serde(rename = "failure")]
Failure(u64),
}
impl NodeContact {
pub fn into_node_contact(self) -> super::NodeContact {
match self {
NodeContact::Success(s) => super::NodeContact::Success(
time::UNIX_EPOCH + Duration::from_secs(s)
),
NodeContact::Failure(s) => super::NodeContact::Failure(
time::UNIX_EPOCH + Duration::from_secs(s)
),
}
}
}
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
pub struct Node { pub struct Node {
pub url: String, pub url: String,
pub attempts: u32, pub last_contact: Option<NodeContact>,
pub failures: u32,
} }
impl Node { impl Node {
pub fn into_node(self) -> Option<super::Node> { pub fn into_node(self) -> Option<super::Node> {
match super::Node::from_str(&self.url) { match super::Node::from_str(&self.url) {
Ok(mut node) => { Ok(mut node) => {
node.attempts = self.attempts; node.last_contact = self.last_contact.map(|c| c.into_node_contact());
node.failures = self.failures;
Some(node) Some(node)
}, },
_ => None, _ => None,
@ -418,10 +502,18 @@ mod json {
impl<'a> From<&'a super::Node> for Node { impl<'a> From<&'a super::Node> for Node {
fn from(node: &'a super::Node) -> Self { fn from(node: &'a super::Node) -> Self {
let last_contact = node.last_contact.and_then(|c| {
match c {
super::NodeContact::Success(t) =>
t.duration_since(time::UNIX_EPOCH).ok().map(|d| NodeContact::Success(d.as_secs())),
super::NodeContact::Failure(t) =>
t.duration_since(time::UNIX_EPOCH).ok().map(|d| NodeContact::Failure(d.as_secs())),
}
});
Node { Node {
url: format!("{}", node), url: format!("{}", node),
attempts: node.attempts, last_contact
failures: node.failures,
} }
} }
} }
@ -464,42 +556,54 @@ mod tests {
} }
#[test] #[test]
fn table_failure_percentage_order() { fn table_last_contact_order() {
let node1 = Node::from_str("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); let node1 = Node::from_str("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap();
let node2 = Node::from_str("enode://b979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); let node2 = Node::from_str("enode://b979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap();
let node3 = Node::from_str("enode://c979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); let node3 = Node::from_str("enode://c979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap();
let node4 = Node::from_str("enode://d979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); let node4 = Node::from_str("enode://d979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap();
let node5 = Node::from_str("enode://e979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap();
let node6 = Node::from_str("enode://f979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap();
let id1 = H512::from_str("a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); let id1 = H512::from_str("a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap();
let id2 = H512::from_str("b979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); let id2 = H512::from_str("b979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap();
let id3 = H512::from_str("c979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); let id3 = H512::from_str("c979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap();
let id4 = H512::from_str("d979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); let id4 = H512::from_str("d979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap();
let id5 = H512::from_str("e979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap();
let id6 = H512::from_str("f979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap();
let mut table = NodeTable::new(None); let mut table = NodeTable::new(None);
table.add_node(node1); table.add_node(node1);
table.add_node(node2); table.add_node(node2);
table.add_node(node3); table.add_node(node3);
table.add_node(node4); table.add_node(node4);
table.add_node(node5);
table.add_node(node6);
// node 1 - failure percentage 100% // failures - nodes 1 & 2
table.get_mut(&id1).unwrap().attempts = 2;
table.note_failure(&id1); table.note_failure(&id1);
table.note_failure(&id1);
// node2 - failure percentage 33%
table.get_mut(&id2).unwrap().attempts = 3;
table.note_failure(&id2); table.note_failure(&id2);
// node3 - failure percentage 0% // success - nodes 3 & 4
table.get_mut(&id3).unwrap().attempts = 1; table.note_success(&id3);
table.note_success(&id4);
// node4 - failure percentage 50% (default when no attempts) // success - node 5 (old contact)
table.get_mut(&id5).unwrap().last_contact = Some(NodeContact::Success(time::UNIX_EPOCH));
// unknown - node 6
let r = table.nodes(IpFilter::default()); let r = table.nodes(IpFilter::default());
assert_eq!(r[0][..], id3[..]); assert_eq!(r[0][..], id4[..]); // most recent success
assert_eq!(r[1][..], id2[..]); assert_eq!(r[1][..], id3[..]);
assert_eq!(r[2][..], id4[..]);
assert_eq!(r[3][..], id1[..]); // unknown (old contacts and new nodes), randomly shuffled
assert!(
r[2][..] == id5[..] && r[3][..] == id6[..] ||
r[2][..] == id6[..] && r[3][..] == id5[..]
);
assert_eq!(r[4][..], id1[..]); // oldest failure
assert_eq!(r[5][..], id2[..]);
} }
#[test] #[test]
@ -507,23 +611,27 @@ mod tests {
let tempdir = TempDir::new("").unwrap(); let tempdir = TempDir::new("").unwrap();
let node1 = Node::from_str("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); let node1 = Node::from_str("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap();
let node2 = Node::from_str("enode://b979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); let node2 = Node::from_str("enode://b979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap();
let node3 = Node::from_str("enode://c979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap();
let id1 = H512::from_str("a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); let id1 = H512::from_str("a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap();
let id2 = H512::from_str("b979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); let id2 = H512::from_str("b979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap();
let id3 = H512::from_str("c979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap();
{ {
let mut table = NodeTable::new(Some(tempdir.path().to_str().unwrap().to_owned())); let mut table = NodeTable::new(Some(tempdir.path().to_str().unwrap().to_owned()));
table.add_node(node1); table.add_node(node1);
table.add_node(node2); table.add_node(node2);
table.add_node(node3);
table.get_mut(&id1).unwrap().attempts = 1; table.note_success(&id2);
table.get_mut(&id2).unwrap().attempts = 1; table.note_failure(&id3);
table.note_failure(&id2);
} }
{ {
let table = NodeTable::new(Some(tempdir.path().to_str().unwrap().to_owned())); let table = NodeTable::new(Some(tempdir.path().to_str().unwrap().to_owned()));
let r = table.nodes(IpFilter::default()); let r = table.nodes(IpFilter::default());
assert_eq!(r[0][..], id1[..]); assert_eq!(r[0][..], id2[..]); // latest success
assert_eq!(r[1][..], id2[..]); assert_eq!(r[1][..], id1[..]); // unknown
assert_eq!(r[2][..], id3[..]); // oldest failure
} }
} }