Merge branch 'master' of github.com:ethcore/parity into docker
This commit is contained in:
commit
b9d2b42050
4
.gitmodules
vendored
4
.gitmodules
vendored
@ -1,4 +1,4 @@
|
|||||||
[submodule "ethcore/res/ethereum/tests"]
|
[submodule "ethcore/res/ethereum/tests"]
|
||||||
path = ethcore/res/ethereum/tests
|
path = ethcore/res/ethereum/tests
|
||||||
url = git@github.com:ethereum/tests
|
url = https://github.com/ethereum/tests.git
|
||||||
branch = develop
|
branch = develop
|
||||||
|
@ -42,7 +42,7 @@ after_success: |
|
|||||||
./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /.cargo,/root/.multirust target/kcov target/debug/deps/ethcore-* &&
|
./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /.cargo,/root/.multirust target/kcov target/debug/deps/ethcore-* &&
|
||||||
./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /.cargo,/root/.multirust target/kcov target/debug/deps/ethsync-* &&
|
./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /.cargo,/root/.multirust target/kcov target/debug/deps/ethsync-* &&
|
||||||
./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /.cargo,/root/.multirust target/kcov target/debug/deps/ethcore_rpc-* &&
|
./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /.cargo,/root/.multirust target/kcov target/debug/deps/ethcore_rpc-* &&
|
||||||
./kcov-master/tmp/usr/local/bin/kcov --coveralls-id=${COVERALLS_TOKEN} --exclude-pattern /.cargo,/root/.multirust target/kcov target/debug/parity-* &&
|
./kcov-master/tmp/usr/local/bin/kcov --coveralls-id=${TRAVIS_JOB_ID} --exclude-pattern /.cargo,/root/.multirust target/kcov target/debug/parity-* &&
|
||||||
[ $TRAVIS_BRANCH = master ] &&
|
[ $TRAVIS_BRANCH = master ] &&
|
||||||
[ $TRAVIS_PULL_REQUEST = false ] &&
|
[ $TRAVIS_PULL_REQUEST = false ] &&
|
||||||
[ $TRAVIS_RUST_VERSION = nightly ] &&
|
[ $TRAVIS_RUST_VERSION = nightly ] &&
|
||||||
|
17
README.md
17
README.md
@ -2,20 +2,22 @@
|
|||||||
|
|
||||||
[![Build Status][travis-image]][travis-url] [![Coverage Status][coveralls-image]][coveralls-url] [![Join the chat at https://gitter.im/trogdoro/xiki][gitter-image]][gitter-url]
|
[![Build Status][travis-image]][travis-url] [![Coverage Status][coveralls-image]][coveralls-url] [![Join the chat at https://gitter.im/trogdoro/xiki][gitter-image]][gitter-url]
|
||||||
|
|
||||||
[travis-image]: https://travis-ci.com/ethcore/parity.svg?token=DMFvZu71iaTbUYx9UypX&branch=master
|
[travis-image]: https://travis-ci.org/ethcore/parity.svg?branch=master
|
||||||
[travis-url]: https://travis-ci.com/ethcore/parity
|
[travis-url]: https://travis-ci.org/ethcore/parity
|
||||||
[coveralls-image]: https://coveralls.io/repos/github/ethcore/parity/badge.svg?branch=master&t=Fk0OuQ
|
[coveralls-image]: https://coveralls.io/repos/github/ethcore/parity/badge.svg?branch=master
|
||||||
[coveralls-url]: https://coveralls.io/r/ethcore/parity?branch=master
|
[coveralls-url]: https://coveralls.io/github/ethcore/parity?branch=master
|
||||||
[gitter-image]: https://badges.gitter.im/Join%20Chat.svg
|
[gitter-image]: https://badges.gitter.im/Join%20Chat.svg
|
||||||
[gitter-url]: https://gitter.im/ethcore/parity?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
|
[gitter-url]: https://gitter.im/ethcore/parity?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
|
||||||
|
|
||||||
|
[Documentation](http://ethcore.github.io/parity/ethcore/index.html)
|
||||||
|
|
||||||
### Building from source
|
### Building from source
|
||||||
|
|
||||||
##### Ubuntu 14.04
|
##### Ubuntu 14.04, 15.04, 15.10
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# install rocksdb
|
# install rocksdb
|
||||||
add-apt-repository "deb http://ppa.launchpad.net/giskou/librocksdb/ubuntu trusty main"
|
add-apt-repository ppa:ethcore/ethcore
|
||||||
apt-get update
|
apt-get update
|
||||||
apt-get install -y --force-yes librocksdb
|
apt-get install -y --force-yes librocksdb
|
||||||
|
|
||||||
@ -32,7 +34,7 @@ cd parity
|
|||||||
cargo build --release
|
cargo build --release
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Linux
|
##### Other Linux
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# install rocksdb
|
# install rocksdb
|
||||||
@ -72,3 +74,4 @@ git clone https://github.com/ethcore/parity
|
|||||||
cd parity
|
cd parity
|
||||||
cargo build --release
|
cargo build --release
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -29,7 +29,8 @@
|
|||||||
"nodes": [
|
"nodes": [
|
||||||
"enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303",
|
"enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303",
|
||||||
"enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303",
|
"enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303",
|
||||||
"enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303"
|
"enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303",
|
||||||
|
"enode://859bbe6926fc161d218f62bd2efe0b4f6980205c00a5b928ccee39c94c440b73a054ece5db36beddd71963fbd296af61ec72a591f72a2299f9a046bd6d6ce1a9@parity-node-zero.ethcore.io:30303"
|
||||||
],
|
],
|
||||||
"accounts": {
|
"accounts": {
|
||||||
"0000000000000000000000000000000000000001": { "builtin": { "name": "ecrecover", "linear": { "base": 3000, "word": 0 } } },
|
"0000000000000000000000000000000000000001": { "builtin": { "name": "ecrecover", "linear": { "base": 3000, "word": 0 } } },
|
||||||
|
@ -30,8 +30,6 @@ use client::BlockStatus;
|
|||||||
/// Block queue status
|
/// Block queue status
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct BlockQueueInfo {
|
pub struct BlockQueueInfo {
|
||||||
/// Indicates that queue is full
|
|
||||||
pub full: bool,
|
|
||||||
/// Number of queued blocks pending verification
|
/// Number of queued blocks pending verification
|
||||||
pub unverified_queue_size: usize,
|
pub unverified_queue_size: usize,
|
||||||
/// Number of verified queued blocks pending import
|
/// Number of verified queued blocks pending import
|
||||||
@ -46,6 +44,16 @@ impl BlockQueueInfo {
|
|||||||
|
|
||||||
/// The size of the unverified and verifying queues.
|
/// The size of the unverified and verifying queues.
|
||||||
pub fn incomplete_queue_size(&self) -> usize { self.unverified_queue_size + self.verifying_queue_size }
|
pub fn incomplete_queue_size(&self) -> usize { self.unverified_queue_size + self.verifying_queue_size }
|
||||||
|
|
||||||
|
/// Indicates that queue is full
|
||||||
|
pub fn is_full(&self) -> bool {
|
||||||
|
self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size > MAX_UNVERIFIED_QUEUE_SIZE
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Indicates that queue is empty
|
||||||
|
pub fn is_empty(&self) -> bool {
|
||||||
|
self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size == 0
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A queue of blocks. Sits between network or other I/O and the BlockChain.
|
/// A queue of blocks. Sits between network or other I/O and the BlockChain.
|
||||||
@ -205,6 +213,8 @@ impl BlockQueue {
|
|||||||
let mut verification = self.verification.lock().unwrap();
|
let mut verification = self.verification.lock().unwrap();
|
||||||
verification.unverified.clear();
|
verification.unverified.clear();
|
||||||
verification.verifying.clear();
|
verification.verifying.clear();
|
||||||
|
verification.verified.clear();
|
||||||
|
self.processing.write().unwrap().clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Wait for queue to be empty
|
/// Wait for queue to be empty
|
||||||
@ -285,7 +295,6 @@ impl BlockQueue {
|
|||||||
for h in hashes {
|
for h in hashes {
|
||||||
processing.remove(&h);
|
processing.remove(&h);
|
||||||
}
|
}
|
||||||
//TODO: reward peers
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Removes up to `max` verified blocks from the queue
|
/// Removes up to `max` verified blocks from the queue
|
||||||
@ -308,7 +317,6 @@ impl BlockQueue {
|
|||||||
pub fn queue_info(&self) -> BlockQueueInfo {
|
pub fn queue_info(&self) -> BlockQueueInfo {
|
||||||
let verification = self.verification.lock().unwrap();
|
let verification = self.verification.lock().unwrap();
|
||||||
BlockQueueInfo {
|
BlockQueueInfo {
|
||||||
full: verification.unverified.len() + verification.verifying.len() + verification.verified.len() >= MAX_UNVERIFIED_QUEUE_SIZE,
|
|
||||||
verified_queue_size: verification.verified.len(),
|
verified_queue_size: verification.verified.len(),
|
||||||
unverified_queue_size: verification.unverified.len(),
|
unverified_queue_size: verification.unverified.len(),
|
||||||
verifying_queue_size: verification.verifying.len(),
|
verifying_queue_size: verification.verifying.len(),
|
||||||
@ -393,4 +401,14 @@ mod tests {
|
|||||||
panic!("error importing block that has already been drained ({:?})", e);
|
panic!("error importing block that has already been drained ({:?})", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn returns_empty_once_finished() {
|
||||||
|
let mut queue = get_test_queue();
|
||||||
|
queue.import_block(get_good_dummy_block()).expect("error importing block that is valid by definition");
|
||||||
|
queue.flush();
|
||||||
|
queue.drain(1);
|
||||||
|
|
||||||
|
assert!(queue.queue_info().is_empty());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -27,7 +27,7 @@ use spec::Spec;
|
|||||||
use engine::Engine;
|
use engine::Engine;
|
||||||
use views::HeaderView;
|
use views::HeaderView;
|
||||||
use block_queue::{BlockQueue, BlockQueueInfo};
|
use block_queue::{BlockQueue, BlockQueueInfo};
|
||||||
use service::NetSyncMessage;
|
use service::{NetSyncMessage, SyncMessage};
|
||||||
use env_info::LastHashes;
|
use env_info::LastHashes;
|
||||||
use verification::*;
|
use verification::*;
|
||||||
use block::*;
|
use block::*;
|
||||||
@ -67,7 +67,6 @@ impl fmt::Display for BlockChainInfo {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Blockchain database client. Owns and manages a blockchain and a block queue.
|
/// Blockchain database client. Owns and manages a blockchain and a block queue.
|
||||||
pub trait BlockChainClient : Sync + Send {
|
pub trait BlockChainClient : Sync + Send {
|
||||||
/// Get raw block header data by block header hash.
|
/// Get raw block header data by block header hash.
|
||||||
@ -155,8 +154,7 @@ impl ClientReport {
|
|||||||
pub struct Client {
|
pub struct Client {
|
||||||
chain: Arc<RwLock<BlockChain>>,
|
chain: Arc<RwLock<BlockChain>>,
|
||||||
engine: Arc<Box<Engine>>,
|
engine: Arc<Box<Engine>>,
|
||||||
state_db: Arc<DB>,
|
state_db: Mutex<JournalDB>,
|
||||||
state_journal: Mutex<JournalDB>,
|
|
||||||
block_queue: RwLock<BlockQueue>,
|
block_queue: RwLock<BlockQueue>,
|
||||||
report: RwLock<ClientReport>,
|
report: RwLock<ClientReport>,
|
||||||
import_lock: Mutex<()>
|
import_lock: Mutex<()>
|
||||||
@ -209,8 +207,7 @@ impl Client {
|
|||||||
Ok(Arc::new(Client {
|
Ok(Arc::new(Client {
|
||||||
chain: chain,
|
chain: chain,
|
||||||
engine: engine.clone(),
|
engine: engine.clone(),
|
||||||
state_db: db.clone(),
|
state_db: Mutex::new(state_db),
|
||||||
state_journal: Mutex::new(JournalDB::new_with_arc(db)),
|
|
||||||
block_queue: RwLock::new(BlockQueue::new(engine, message_channel)),
|
block_queue: RwLock::new(BlockQueue::new(engine, message_channel)),
|
||||||
report: RwLock::new(Default::default()),
|
report: RwLock::new(Default::default()),
|
||||||
import_lock: Mutex::new(()),
|
import_lock: Mutex::new(()),
|
||||||
@ -223,7 +220,7 @@ impl Client {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// This is triggered by a message coming from a block queue when the block is ready for insertion
|
/// This is triggered by a message coming from a block queue when the block is ready for insertion
|
||||||
pub fn import_verified_blocks(&self, _io: &IoChannel<NetSyncMessage>) -> usize {
|
pub fn import_verified_blocks(&self, io: &IoChannel<NetSyncMessage>) -> usize {
|
||||||
let mut ret = 0;
|
let mut ret = 0;
|
||||||
let mut bad = HashSet::new();
|
let mut bad = HashSet::new();
|
||||||
let _import_lock = self.import_lock.lock();
|
let _import_lock = self.import_lock.lock();
|
||||||
@ -265,7 +262,7 @@ impl Client {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let db = self.state_journal.lock().unwrap().clone();
|
let db = self.state_db.lock().unwrap().clone();
|
||||||
let result = match enact_verified(&block, self.engine.deref().deref(), db, &parent, &last_hashes) {
|
let result = match enact_verified(&block, self.engine.deref().deref(), db, &parent, &last_hashes) {
|
||||||
Ok(b) => b,
|
Ok(b) => b,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@ -295,6 +292,10 @@ impl Client {
|
|||||||
self.report.write().unwrap().accrue_block(&block);
|
self.report.write().unwrap().accrue_block(&block);
|
||||||
trace!(target: "client", "Imported #{} ({})", header.number(), header.hash());
|
trace!(target: "client", "Imported #{} ({})", header.number(), header.hash());
|
||||||
ret += 1;
|
ret += 1;
|
||||||
|
|
||||||
|
if self.block_queue.read().unwrap().queue_info().is_empty() {
|
||||||
|
io.send(NetworkIoMessage::User(SyncMessage::BlockVerified)).unwrap();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
self.block_queue.write().unwrap().mark_as_good(&good_blocks);
|
self.block_queue.write().unwrap().mark_as_good(&good_blocks);
|
||||||
ret
|
ret
|
||||||
@ -302,7 +303,7 @@ impl Client {
|
|||||||
|
|
||||||
/// Get a copy of the best block's state.
|
/// Get a copy of the best block's state.
|
||||||
pub fn state(&self) -> State {
|
pub fn state(&self) -> State {
|
||||||
State::from_existing(JournalDB::new_with_arc(self.state_db.clone()), HeaderView::new(&self.best_block_header()).state_root(), self.engine.account_start_nonce())
|
State::from_existing(self.state_db.lock().unwrap().clone(), HeaderView::new(&self.best_block_header()).state_root(), self.engine.account_start_nonce())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get info on the cache.
|
/// Get info on the cache.
|
||||||
|
@ -570,7 +570,7 @@ function run_installer()
|
|||||||
sudo apt-add-repository -y ppa:ethcore/ethcore
|
sudo apt-add-repository -y ppa:ethcore/ethcore
|
||||||
sudo apt-get -f -y install
|
sudo apt-get -f -y install
|
||||||
sudo apt-get update -qq
|
sudo apt-get update -qq
|
||||||
sudo apt-get install -qq -y librocksdb
|
sudo apt-get install -qq -y librocksdb-dev
|
||||||
}
|
}
|
||||||
|
|
||||||
function linux_rocksdb_installer()
|
function linux_rocksdb_installer()
|
||||||
@ -628,8 +628,8 @@ function run_installer()
|
|||||||
|
|
||||||
if [[ $isMultirustNightly == false ]]; then
|
if [[ $isMultirustNightly == false ]]; then
|
||||||
info "Installing rust nightly..."
|
info "Installing rust nightly..."
|
||||||
sudo multirust update nightly
|
multirust update nightly
|
||||||
sudo multirust default nightly
|
multirust default nightly
|
||||||
echo
|
echo
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -33,6 +33,7 @@ extern crate fdlimit;
|
|||||||
#[cfg(feature = "rpc")]
|
#[cfg(feature = "rpc")]
|
||||||
extern crate ethcore_rpc as rpc;
|
extern crate ethcore_rpc as rpc;
|
||||||
|
|
||||||
|
use std::net::{SocketAddr};
|
||||||
use std::env;
|
use std::env;
|
||||||
use rlog::{LogLevelFilter};
|
use rlog::{LogLevelFilter};
|
||||||
use env_logger::LogBuilder;
|
use env_logger::LogBuilder;
|
||||||
@ -56,11 +57,15 @@ Options:
|
|||||||
-j --jsonrpc Enable the JSON-RPC API sever.
|
-j --jsonrpc Enable the JSON-RPC API sever.
|
||||||
--jsonrpc-url URL Specify URL for JSON-RPC API server [default: 127.0.0.1:8545].
|
--jsonrpc-url URL Specify URL for JSON-RPC API server [default: 127.0.0.1:8545].
|
||||||
|
|
||||||
|
--listen-address URL Specify the IP/port on which to listen for peers [default: 0.0.0.0:30304].
|
||||||
|
--public-address URL Specify the IP/port on which peers may connect [default: 0.0.0.0:30304].
|
||||||
|
--address URL Equivalent to --listen-address URL --public-address URL.
|
||||||
|
|
||||||
--cache-pref-size BYTES Specify the prefered size of the blockchain cache in bytes [default: 16384].
|
--cache-pref-size BYTES Specify the prefered size of the blockchain cache in bytes [default: 16384].
|
||||||
--cache-max-size BYTES Specify the maximum size of the blockchain cache in bytes [default: 262144].
|
--cache-max-size BYTES Specify the maximum size of the blockchain cache in bytes [default: 262144].
|
||||||
|
|
||||||
-h --help Show this screen.
|
-h --help Show this screen.
|
||||||
", flag_cache_pref_size: usize, flag_cache_max_size: usize);
|
", flag_cache_pref_size: usize, flag_cache_max_size: usize, flag_address: Option<String>);
|
||||||
|
|
||||||
fn setup_log(init: &str) {
|
fn setup_log(init: &str) {
|
||||||
let mut builder = LogBuilder::new();
|
let mut builder = LogBuilder::new();
|
||||||
@ -105,6 +110,16 @@ fn main() {
|
|||||||
};
|
};
|
||||||
let mut net_settings = NetworkConfiguration::new();
|
let mut net_settings = NetworkConfiguration::new();
|
||||||
net_settings.boot_nodes = init_nodes;
|
net_settings.boot_nodes = init_nodes;
|
||||||
|
match args.flag_address {
|
||||||
|
None => {
|
||||||
|
net_settings.listen_address = SocketAddr::from_str(args.flag_listen_address.as_ref()).expect("Invalid listen address given with --listen-address");
|
||||||
|
net_settings.public_address = SocketAddr::from_str(args.flag_public_address.as_ref()).expect("Invalid public address given with --public-address");
|
||||||
|
}
|
||||||
|
Some(ref a) => {
|
||||||
|
net_settings.public_address = SocketAddr::from_str(a.as_ref()).expect("Invalid listen/public address given with --address");
|
||||||
|
net_settings.listen_address = net_settings.public_address.clone();
|
||||||
|
}
|
||||||
|
}
|
||||||
let mut service = ClientService::start(spec, net_settings).unwrap();
|
let mut service = ClientService::start(spec, net_settings).unwrap();
|
||||||
let client = service.client().clone();
|
let client = service.client().clone();
|
||||||
client.configure_cache(args.flag_cache_pref_size, args.flag_cache_max_size);
|
client.configure_cache(args.flag_cache_pref_size, args.flag_cache_max_size);
|
||||||
|
@ -14,4 +14,4 @@ clippy = "0.0.37"
|
|||||||
log = "0.3"
|
log = "0.3"
|
||||||
env_logger = "0.3"
|
env_logger = "0.3"
|
||||||
time = "0.1.34"
|
time = "0.1.34"
|
||||||
|
rand = "0.3.13"
|
||||||
|
@ -62,6 +62,9 @@ const MAX_NODE_DATA_TO_SEND: usize = 1024;
|
|||||||
const MAX_RECEIPTS_TO_SEND: usize = 1024;
|
const MAX_RECEIPTS_TO_SEND: usize = 1024;
|
||||||
const MAX_HEADERS_TO_REQUEST: usize = 512;
|
const MAX_HEADERS_TO_REQUEST: usize = 512;
|
||||||
const MAX_BODIES_TO_REQUEST: usize = 256;
|
const MAX_BODIES_TO_REQUEST: usize = 256;
|
||||||
|
const MIN_PEERS_PROPAGATION: usize = 4;
|
||||||
|
const MAX_PEERS_PROPAGATION: usize = 128;
|
||||||
|
const MAX_PEER_LAG_PROPAGATION: BlockNumber = 20;
|
||||||
|
|
||||||
const STATUS_PACKET: u8 = 0x00;
|
const STATUS_PACKET: u8 = 0x00;
|
||||||
const NEW_BLOCK_HASHES_PACKET: u8 = 0x01;
|
const NEW_BLOCK_HASHES_PACKET: u8 = 0x01;
|
||||||
@ -134,7 +137,7 @@ pub struct SyncStatus {
|
|||||||
pub num_active_peers: usize,
|
pub num_active_peers: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Debug)]
|
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||||
/// Peer data type requested
|
/// Peer data type requested
|
||||||
enum PeerAsking {
|
enum PeerAsking {
|
||||||
Nothing,
|
Nothing,
|
||||||
@ -142,6 +145,7 @@ enum PeerAsking {
|
|||||||
BlockBodies,
|
BlockBodies,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
/// Syncing peer information
|
/// Syncing peer information
|
||||||
struct PeerInfo {
|
struct PeerInfo {
|
||||||
/// eth protocol version
|
/// eth protocol version
|
||||||
@ -430,12 +434,11 @@ impl ChainSync {
|
|||||||
let block_rlp = try!(r.at(0));
|
let block_rlp = try!(r.at(0));
|
||||||
let header_rlp = try!(block_rlp.at(0));
|
let header_rlp = try!(block_rlp.at(0));
|
||||||
let h = header_rlp.as_raw().sha3();
|
let h = header_rlp.as_raw().sha3();
|
||||||
|
|
||||||
trace!(target: "sync", "{} -> NewBlock ({})", peer_id, h);
|
trace!(target: "sync", "{} -> NewBlock ({})", peer_id, h);
|
||||||
let header_view = HeaderView::new(header_rlp.as_raw());
|
let header: BlockHeader = try!(header_rlp.as_val());
|
||||||
let mut unknown = false;
|
let mut unknown = false;
|
||||||
// TODO: Decompose block and add to self.headers and self.bodies instead
|
// TODO: Decompose block and add to self.headers and self.bodies instead
|
||||||
if header_view.number() == From::from(self.current_base_block() + 1) {
|
if header.number == From::from(self.current_base_block() + 1) {
|
||||||
match io.chain().import_block(block_rlp.as_raw().to_vec()) {
|
match io.chain().import_block(block_rlp.as_raw().to_vec()) {
|
||||||
Err(ImportError::AlreadyInChain) => {
|
Err(ImportError::AlreadyInChain) => {
|
||||||
trace!(target: "sync", "New block already in chain {:?}", h);
|
trace!(target: "sync", "New block already in chain {:?}", h);
|
||||||
@ -468,7 +471,7 @@ impl ChainSync {
|
|||||||
trace!(target: "sync", "Received block {:?} with no known parent. Peer needs syncing...", h);
|
trace!(target: "sync", "Received block {:?} with no known parent. Peer needs syncing...", h);
|
||||||
{
|
{
|
||||||
let peer = self.peers.get_mut(&peer_id).expect("ChainSync: unknown peer");
|
let peer = self.peers.get_mut(&peer_id).expect("ChainSync: unknown peer");
|
||||||
peer.latest = header_view.sha3();
|
peer.latest = header.hash();
|
||||||
}
|
}
|
||||||
self.sync_peer(io, peer_id, true);
|
self.sync_peer(io, peer_id, true);
|
||||||
}
|
}
|
||||||
@ -593,7 +596,7 @@ impl ChainSync {
|
|||||||
fn request_blocks(&mut self, io: &mut SyncIo, peer_id: PeerId) {
|
fn request_blocks(&mut self, io: &mut SyncIo, peer_id: PeerId) {
|
||||||
self.clear_peer_download(peer_id);
|
self.clear_peer_download(peer_id);
|
||||||
|
|
||||||
if io.chain().queue_info().full {
|
if io.chain().queue_info().is_full() {
|
||||||
self.pause_sync();
|
self.pause_sync();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -636,6 +639,7 @@ impl ChainSync {
|
|||||||
if start == 0 {
|
if start == 0 {
|
||||||
self.have_common_block = true; //reached genesis
|
self.have_common_block = true; //reached genesis
|
||||||
self.last_imported_hash = Some(chain_info.genesis_hash);
|
self.last_imported_hash = Some(chain_info.genesis_hash);
|
||||||
|
self.last_imported_block = Some(0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if self.have_common_block {
|
if self.have_common_block {
|
||||||
@ -1030,10 +1034,6 @@ impl ChainSync {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Maintain other peers. Send out any new blocks and transactions
|
|
||||||
pub fn _maintain_sync(&mut self, _io: &mut SyncIo) {
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn maintain_peers(&self, io: &mut SyncIo) {
|
pub fn maintain_peers(&self, io: &mut SyncIo) {
|
||||||
let tick = time::precise_time_s();
|
let tick = time::precise_time_s();
|
||||||
for (peer_id, peer) in &self.peers {
|
for (peer_id, peer) in &self.peers {
|
||||||
@ -1042,13 +1042,125 @@ impl ChainSync {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/// Maintain other peers. Send out any new blocks and transactions
|
|
||||||
pub fn maintain_sync(&mut self, io: &mut SyncIo) {
|
fn check_resume(&mut self, io: &mut SyncIo) {
|
||||||
if !io.chain().queue_info().full && self.state == SyncState::Waiting {
|
if !io.chain().queue_info().is_full() && self.state == SyncState::Waiting {
|
||||||
self.state = SyncState::Idle;
|
self.state = SyncState::Idle;
|
||||||
self.continue_sync(io);
|
self.continue_sync(io);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// creates rlp to send for the tree defined by 'from' and 'to' hashes
|
||||||
|
fn create_new_hashes_rlp(chain: &BlockChainClient, from: &H256, to: &H256) -> Option<Bytes> {
|
||||||
|
match chain.tree_route(from, to) {
|
||||||
|
Some(route) => {
|
||||||
|
match route.blocks.len() {
|
||||||
|
0 => None,
|
||||||
|
_ => {
|
||||||
|
let mut rlp_stream = RlpStream::new_list(route.blocks.len());
|
||||||
|
for block_hash in route.blocks {
|
||||||
|
let mut hash_rlp = RlpStream::new_list(2);
|
||||||
|
let difficulty = chain.block_total_difficulty(&block_hash).expect("Mallformed block without a difficulty on the chain!");
|
||||||
|
hash_rlp.append(&block_hash);
|
||||||
|
hash_rlp.append(&difficulty);
|
||||||
|
rlp_stream.append_raw(&hash_rlp.out(), 1);
|
||||||
|
}
|
||||||
|
Some(rlp_stream.out())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
None => None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// creates latest block rlp for the given client
|
||||||
|
fn create_latest_block_rlp(chain: &BlockChainClient) -> Bytes {
|
||||||
|
let mut rlp_stream = RlpStream::new_list(2);
|
||||||
|
rlp_stream.append_raw(&chain.block(&chain.chain_info().best_block_hash).expect("Creating latest block when there is none"), 1);
|
||||||
|
rlp_stream.append(&chain.chain_info().total_difficulty);
|
||||||
|
rlp_stream.out()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// returns peer ids that have less blocks than our chain
|
||||||
|
fn get_lagging_peers(&self, io: &SyncIo) -> Vec<PeerId> {
|
||||||
|
let chain = io.chain();
|
||||||
|
let chain_info = chain.chain_info();
|
||||||
|
let latest_hash = chain_info.best_block_hash;
|
||||||
|
let latest_number = chain_info.best_block_number;
|
||||||
|
self.peers.iter().filter(|&(_, peer_info)|
|
||||||
|
match io.chain().block_status(&peer_info.latest)
|
||||||
|
{
|
||||||
|
BlockStatus::InChain => {
|
||||||
|
let peer_number = HeaderView::new(&io.chain().block_header(&peer_info.latest).unwrap()).number();
|
||||||
|
peer_info.latest != latest_hash && latest_number > peer_number && latest_number - peer_number < MAX_PEER_LAG_PROPAGATION
|
||||||
|
},
|
||||||
|
_ => false
|
||||||
|
})
|
||||||
|
.map(|(peer_id, _)| peer_id)
|
||||||
|
.cloned().collect::<Vec<PeerId>>()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// propagades latest block to lagging peers
|
||||||
|
fn propagade_blocks(&mut self, io: &mut SyncIo) -> usize {
|
||||||
|
let updated_peers = {
|
||||||
|
let lagging_peers = self.get_lagging_peers(io);
|
||||||
|
|
||||||
|
// sqrt(x)/x scaled to max u32
|
||||||
|
let fraction = (self.peers.len() as f64).powf(-0.5).mul(u32::max_value() as f64).round() as u32;
|
||||||
|
let lucky_peers = match lagging_peers.len() {
|
||||||
|
0 ... MIN_PEERS_PROPAGATION => lagging_peers,
|
||||||
|
_ => lagging_peers.iter().filter(|_| ::rand::random::<u32>() < fraction).cloned().collect::<Vec<PeerId>>()
|
||||||
|
};
|
||||||
|
|
||||||
|
// taking at max of MAX_PEERS_PROPAGATION
|
||||||
|
lucky_peers.iter().take(min(lucky_peers.len(), MAX_PEERS_PROPAGATION)).cloned().collect::<Vec<PeerId>>()
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut sent = 0;
|
||||||
|
let local_best = io.chain().chain_info().best_block_hash;
|
||||||
|
for peer_id in updated_peers {
|
||||||
|
let rlp = ChainSync::create_latest_block_rlp(io.chain());
|
||||||
|
self.send_request(io, peer_id, PeerAsking::Nothing, NEW_BLOCK_PACKET, rlp);
|
||||||
|
self.peers.get_mut(&peer_id).expect("ChainSync: unknown peer").latest = local_best.clone();
|
||||||
|
sent = sent + 1;
|
||||||
|
}
|
||||||
|
sent
|
||||||
|
}
|
||||||
|
|
||||||
|
/// propagades new known hashes to all peers
|
||||||
|
fn propagade_new_hashes(&mut self, io: &mut SyncIo) -> usize {
|
||||||
|
let updated_peers = self.get_lagging_peers(io);
|
||||||
|
let mut sent = 0;
|
||||||
|
let local_best = io.chain().chain_info().best_block_hash;
|
||||||
|
for peer_id in updated_peers {
|
||||||
|
sent = sent + match ChainSync::create_new_hashes_rlp(io.chain(), &self.peers.get(&peer_id).expect("ChainSync: unknown peer").latest, &local_best) {
|
||||||
|
Some(rlp) => {
|
||||||
|
{
|
||||||
|
let peer = self.peers.get_mut(&peer_id).expect("ChainSync: unknown peer");
|
||||||
|
peer.latest = local_best.clone();
|
||||||
|
}
|
||||||
|
self.send_request(io, peer_id, PeerAsking::Nothing, NEW_BLOCK_HASHES_PACKET, rlp);
|
||||||
|
1
|
||||||
|
},
|
||||||
|
None => 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sent
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Maintain other peers. Send out any new blocks and transactions
|
||||||
|
pub fn maintain_sync(&mut self, io: &mut SyncIo) {
|
||||||
|
self.check_resume(io);
|
||||||
|
|
||||||
|
let peers = self.propagade_new_hashes(io);
|
||||||
|
trace!(target: "sync", "Sent new hashes to peers: {:?}", peers);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// should be called once chain has new block, triggers the latest block propagation
|
||||||
|
pub fn chain_blocks_verified(&mut self, io: &mut SyncIo) {
|
||||||
|
let peers = self.propagade_blocks(io);
|
||||||
|
trace!(target: "sync", "Sent latest block to peers: {:?}", peers);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@ -1056,6 +1168,48 @@ mod tests {
|
|||||||
use tests::helpers::*;
|
use tests::helpers::*;
|
||||||
use super::*;
|
use super::*;
|
||||||
use util::*;
|
use util::*;
|
||||||
|
use super::{PeerInfo, PeerAsking};
|
||||||
|
use ethcore::header::*;
|
||||||
|
use ethcore::client::*;
|
||||||
|
|
||||||
|
fn get_dummy_block(order: u32, parent_hash: H256) -> Bytes {
|
||||||
|
let mut header = Header::new();
|
||||||
|
header.gas_limit = x!(0);
|
||||||
|
header.difficulty = x!(order * 100);
|
||||||
|
header.timestamp = (order * 10) as u64;
|
||||||
|
header.number = order as u64;
|
||||||
|
header.parent_hash = parent_hash;
|
||||||
|
header.state_root = H256::zero();
|
||||||
|
|
||||||
|
let mut rlp = RlpStream::new_list(3);
|
||||||
|
rlp.append(&header);
|
||||||
|
rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1);
|
||||||
|
rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1);
|
||||||
|
rlp.out()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_dummy_blocks(order: u32, parent_hash: H256) -> Bytes {
|
||||||
|
let mut rlp = RlpStream::new_list(1);
|
||||||
|
rlp.append_raw(&get_dummy_block(order, parent_hash), 1);
|
||||||
|
let difficulty: U256 = x!(100 * order);
|
||||||
|
rlp.append(&difficulty);
|
||||||
|
rlp.out()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_dummy_hashes() -> Bytes {
|
||||||
|
let mut rlp = RlpStream::new_list(5);
|
||||||
|
for _ in 0..5 {
|
||||||
|
let mut hash_d_rlp = RlpStream::new_list(2);
|
||||||
|
let hash: H256 = H256::from(0u64);
|
||||||
|
let diff: U256 = U256::from(1u64);
|
||||||
|
hash_d_rlp.append(&hash);
|
||||||
|
hash_d_rlp.append(&diff);
|
||||||
|
|
||||||
|
rlp.append_raw(&hash_d_rlp.out(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
rlp.out()
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn return_receipts_empty() {
|
fn return_receipts_empty() {
|
||||||
@ -1124,4 +1278,204 @@ mod tests {
|
|||||||
sync.on_packet(&mut io, 1usize, super::GET_NODE_DATA_PACKET, &node_request);
|
sync.on_packet(&mut io, 1usize, super::GET_NODE_DATA_PACKET, &node_request);
|
||||||
assert_eq!(1, io.queue.len());
|
assert_eq!(1, io.queue.len());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn dummy_sync_with_peer(peer_latest_hash: H256) -> ChainSync {
|
||||||
|
let mut sync = ChainSync::new();
|
||||||
|
sync.peers.insert(0,
|
||||||
|
PeerInfo {
|
||||||
|
protocol_version: 0,
|
||||||
|
genesis: H256::zero(),
|
||||||
|
network_id: U256::zero(),
|
||||||
|
latest: peer_latest_hash,
|
||||||
|
difficulty: U256::zero(),
|
||||||
|
asking: PeerAsking::Nothing,
|
||||||
|
asking_blocks: Vec::<BlockNumber>::new(),
|
||||||
|
ask_time: 0f64,
|
||||||
|
});
|
||||||
|
sync
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn finds_lagging_peers() {
|
||||||
|
let mut client = TestBlockChainClient::new();
|
||||||
|
client.add_blocks(100, false);
|
||||||
|
let mut queue = VecDeque::new();
|
||||||
|
let sync = dummy_sync_with_peer(client.block_hash_delta_minus(10));
|
||||||
|
let io = TestIo::new(&mut client, &mut queue, None);
|
||||||
|
|
||||||
|
let lagging_peers = sync.get_lagging_peers(&io);
|
||||||
|
|
||||||
|
assert_eq!(1, lagging_peers.len())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn calculates_tree_for_lagging_peer() {
|
||||||
|
let mut client = TestBlockChainClient::new();
|
||||||
|
client.add_blocks(15, false);
|
||||||
|
|
||||||
|
let start = client.block_hash_delta_minus(4);
|
||||||
|
let end = client.block_hash_delta_minus(2);
|
||||||
|
|
||||||
|
// wrong way end -> start, should be None
|
||||||
|
let rlp = ChainSync::create_new_hashes_rlp(&client, &end, &start);
|
||||||
|
assert!(rlp.is_none());
|
||||||
|
|
||||||
|
let rlp = ChainSync::create_new_hashes_rlp(&client, &start, &end).unwrap();
|
||||||
|
// size of three rlp encoded hash-difficulty
|
||||||
|
assert_eq!(107, rlp.len());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn sends_new_hashes_to_lagging_peer() {
|
||||||
|
let mut client = TestBlockChainClient::new();
|
||||||
|
client.add_blocks(100, false);
|
||||||
|
let mut queue = VecDeque::new();
|
||||||
|
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
|
||||||
|
let mut io = TestIo::new(&mut client, &mut queue, None);
|
||||||
|
|
||||||
|
let peer_count = sync.propagade_new_hashes(&mut io);
|
||||||
|
|
||||||
|
// 1 message should be send
|
||||||
|
assert_eq!(1, io.queue.len());
|
||||||
|
// 1 peer should be updated
|
||||||
|
assert_eq!(1, peer_count);
|
||||||
|
// NEW_BLOCK_HASHES_PACKET
|
||||||
|
assert_eq!(0x01, io.queue[0].packet_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn sends_latest_block_to_lagging_peer() {
|
||||||
|
let mut client = TestBlockChainClient::new();
|
||||||
|
client.add_blocks(100, false);
|
||||||
|
let mut queue = VecDeque::new();
|
||||||
|
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
|
||||||
|
let mut io = TestIo::new(&mut client, &mut queue, None);
|
||||||
|
|
||||||
|
let peer_count = sync.propagade_blocks(&mut io);
|
||||||
|
|
||||||
|
// 1 message should be send
|
||||||
|
assert_eq!(1, io.queue.len());
|
||||||
|
// 1 peer should be updated
|
||||||
|
assert_eq!(1, peer_count);
|
||||||
|
// NEW_BLOCK_PACKET
|
||||||
|
assert_eq!(0x07, io.queue[0].packet_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn handles_peer_new_block_mallformed() {
|
||||||
|
let mut client = TestBlockChainClient::new();
|
||||||
|
client.add_blocks(10, false);
|
||||||
|
|
||||||
|
let block_data = get_dummy_block(11, client.chain_info().best_block_hash);
|
||||||
|
|
||||||
|
let mut queue = VecDeque::new();
|
||||||
|
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
|
||||||
|
let mut io = TestIo::new(&mut client, &mut queue, None);
|
||||||
|
|
||||||
|
let block = UntrustedRlp::new(&block_data);
|
||||||
|
|
||||||
|
let result = sync.on_peer_new_block(&mut io, 0, &block);
|
||||||
|
|
||||||
|
assert!(result.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn handles_peer_new_block() {
|
||||||
|
let mut client = TestBlockChainClient::new();
|
||||||
|
client.add_blocks(10, false);
|
||||||
|
|
||||||
|
let block_data = get_dummy_blocks(11, client.chain_info().best_block_hash);
|
||||||
|
|
||||||
|
let mut queue = VecDeque::new();
|
||||||
|
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
|
||||||
|
let mut io = TestIo::new(&mut client, &mut queue, None);
|
||||||
|
|
||||||
|
let block = UntrustedRlp::new(&block_data);
|
||||||
|
|
||||||
|
let result = sync.on_peer_new_block(&mut io, 0, &block);
|
||||||
|
|
||||||
|
assert!(result.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn handles_peer_new_block_empty() {
|
||||||
|
let mut client = TestBlockChainClient::new();
|
||||||
|
client.add_blocks(10, false);
|
||||||
|
let mut queue = VecDeque::new();
|
||||||
|
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
|
||||||
|
let mut io = TestIo::new(&mut client, &mut queue, None);
|
||||||
|
|
||||||
|
let empty_data = vec![];
|
||||||
|
let block = UntrustedRlp::new(&empty_data);
|
||||||
|
|
||||||
|
let result = sync.on_peer_new_block(&mut io, 0, &block);
|
||||||
|
|
||||||
|
assert!(result.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn handles_peer_new_hashes() {
|
||||||
|
let mut client = TestBlockChainClient::new();
|
||||||
|
client.add_blocks(10, false);
|
||||||
|
let mut queue = VecDeque::new();
|
||||||
|
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
|
||||||
|
let mut io = TestIo::new(&mut client, &mut queue, None);
|
||||||
|
|
||||||
|
let hashes_data = get_dummy_hashes();
|
||||||
|
let hashes_rlp = UntrustedRlp::new(&hashes_data);
|
||||||
|
|
||||||
|
let result = sync.on_peer_new_hashes(&mut io, 0, &hashes_rlp);
|
||||||
|
|
||||||
|
assert!(result.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn handles_peer_new_hashes_empty() {
|
||||||
|
let mut client = TestBlockChainClient::new();
|
||||||
|
client.add_blocks(10, false);
|
||||||
|
let mut queue = VecDeque::new();
|
||||||
|
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
|
||||||
|
let mut io = TestIo::new(&mut client, &mut queue, None);
|
||||||
|
|
||||||
|
let empty_hashes_data = vec![];
|
||||||
|
let hashes_rlp = UntrustedRlp::new(&empty_hashes_data);
|
||||||
|
|
||||||
|
let result = sync.on_peer_new_hashes(&mut io, 0, &hashes_rlp);
|
||||||
|
|
||||||
|
assert!(result.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
// idea is that what we produce when propagading latest hashes should be accepted in
|
||||||
|
// on_peer_new_hashes in our code as well
|
||||||
|
#[test]
|
||||||
|
fn hashes_rlp_mutually_acceptable() {
|
||||||
|
let mut client = TestBlockChainClient::new();
|
||||||
|
client.add_blocks(100, false);
|
||||||
|
let mut queue = VecDeque::new();
|
||||||
|
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
|
||||||
|
let mut io = TestIo::new(&mut client, &mut queue, None);
|
||||||
|
|
||||||
|
sync.propagade_new_hashes(&mut io);
|
||||||
|
|
||||||
|
let data = &io.queue[0].data.clone();
|
||||||
|
let result = sync.on_peer_new_hashes(&mut io, 0, &UntrustedRlp::new(&data));
|
||||||
|
assert!(result.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
// idea is that what we produce when propagading latest block should be accepted in
|
||||||
|
// on_peer_new_block in our code as well
|
||||||
|
#[test]
|
||||||
|
fn block_rlp_mutually_acceptable() {
|
||||||
|
let mut client = TestBlockChainClient::new();
|
||||||
|
client.add_blocks(100, false);
|
||||||
|
let mut queue = VecDeque::new();
|
||||||
|
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
|
||||||
|
let mut io = TestIo::new(&mut client, &mut queue, None);
|
||||||
|
|
||||||
|
sync.propagade_blocks(&mut io);
|
||||||
|
|
||||||
|
let data = &io.queue[0].data.clone();
|
||||||
|
let result = sync.on_peer_new_block(&mut io, 0, &UntrustedRlp::new(&data));
|
||||||
|
assert!(result.is_ok());
|
||||||
|
}
|
||||||
}
|
}
|
@ -50,6 +50,7 @@ extern crate ethcore_util as util;
|
|||||||
extern crate ethcore;
|
extern crate ethcore;
|
||||||
extern crate env_logger;
|
extern crate env_logger;
|
||||||
extern crate time;
|
extern crate time;
|
||||||
|
extern crate rand;
|
||||||
|
|
||||||
use std::ops::*;
|
use std::ops::*;
|
||||||
use std::sync::*;
|
use std::sync::*;
|
||||||
@ -125,4 +126,10 @@ impl NetworkProtocolHandler<SyncMessage> for EthSync {
|
|||||||
self.sync.write().unwrap().maintain_peers(&mut NetSyncIo::new(io, self.chain.deref()));
|
self.sync.write().unwrap().maintain_peers(&mut NetSyncIo::new(io, self.chain.deref()));
|
||||||
self.sync.write().unwrap().maintain_sync(&mut NetSyncIo::new(io, self.chain.deref()));
|
self.sync.write().unwrap().maintain_sync(&mut NetSyncIo::new(io, self.chain.deref()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn message(&self, io: &NetworkContext<SyncMessage>, message: &SyncMessage) {
|
||||||
|
if let SyncMessage::BlockVerified = *message {
|
||||||
|
self.sync.write().unwrap().chain_blocks_verified(&mut NetSyncIo::new(io, self.chain.deref()));
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
@ -104,4 +104,48 @@ fn restart() {
|
|||||||
fn status_empty() {
|
fn status_empty() {
|
||||||
let net = TestNet::new(2);
|
let net = TestNet::new(2);
|
||||||
assert_eq!(net.peer(0).sync.status().state, SyncState::NotSynced);
|
assert_eq!(net.peer(0).sync.status().state, SyncState::NotSynced);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn status_packet() {
|
||||||
|
let mut net = TestNet::new(2);
|
||||||
|
net.peer_mut(0).chain.add_blocks(1000, false);
|
||||||
|
net.peer_mut(1).chain.add_blocks(1, false);
|
||||||
|
|
||||||
|
net.start();
|
||||||
|
|
||||||
|
net.sync_step_peer(0);
|
||||||
|
|
||||||
|
assert_eq!(1, net.peer(0).queue.len());
|
||||||
|
assert_eq!(0x00, net.peer(0).queue[0].packet_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn propagade_hashes() {
|
||||||
|
let mut net = TestNet::new(3);
|
||||||
|
net.peer_mut(1).chain.add_blocks(1000, false);
|
||||||
|
net.peer_mut(2).chain.add_blocks(1000, false);
|
||||||
|
net.sync();
|
||||||
|
|
||||||
|
net.peer_mut(0).chain.add_blocks(10, false);
|
||||||
|
net.sync_step_peer(0);
|
||||||
|
|
||||||
|
// 2 peers to sync
|
||||||
|
assert_eq!(2, net.peer(0).queue.len());
|
||||||
|
// NEW_BLOCK_HASHES_PACKET
|
||||||
|
assert_eq!(0x01, net.peer(0).queue[0].packet_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn propagade_blocks() {
|
||||||
|
let mut net = TestNet::new(2);
|
||||||
|
net.peer_mut(1).chain.add_blocks(10, false);
|
||||||
|
net.sync();
|
||||||
|
|
||||||
|
net.peer_mut(0).chain.add_blocks(10, false);
|
||||||
|
net.trigger_block_verified(0);
|
||||||
|
|
||||||
|
assert!(!net.peer(0).queue.is_empty());
|
||||||
|
// NEW_BLOCK_PACKET
|
||||||
|
assert_eq!(0x07, net.peer(0).queue[0].packet_id);
|
||||||
}
|
}
|
@ -69,16 +69,21 @@ impl TestBlockChainClient {
|
|||||||
self.import_block(rlp.as_raw().to_vec()).unwrap();
|
self.import_block(rlp.as_raw().to_vec()).unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn block_hash_delta_minus(&mut self, delta: usize) -> H256 {
|
||||||
|
let blocks_read = self.numbers.read().unwrap();
|
||||||
|
let index = blocks_read.len() - delta;
|
||||||
|
blocks_read[&index].clone()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BlockChainClient for TestBlockChainClient {
|
impl BlockChainClient for TestBlockChainClient {
|
||||||
fn block_total_difficulty(&self, _h: &H256) -> Option<U256> {
|
fn block_total_difficulty(&self, _h: &H256) -> Option<U256> {
|
||||||
unimplemented!();
|
Some(U256::zero())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_header(&self, h: &H256) -> Option<Bytes> {
|
fn block_header(&self, h: &H256) -> Option<Bytes> {
|
||||||
self.blocks.read().unwrap().get(h).map(|r| Rlp::new(r).at(0).as_raw().to_vec())
|
self.blocks.read().unwrap().get(h).map(|r| Rlp::new(r).at(0).as_raw().to_vec())
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_body(&self, h: &H256) -> Option<Bytes> {
|
fn block_body(&self, h: &H256) -> Option<Bytes> {
|
||||||
@ -125,11 +130,33 @@ impl BlockChainClient for TestBlockChainClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn tree_route(&self, _from: &H256, _to: &H256) -> Option<TreeRoute> {
|
// works only if blocks are one after another 1 -> 2 -> 3
|
||||||
|
fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute> {
|
||||||
Some(TreeRoute {
|
Some(TreeRoute {
|
||||||
blocks: Vec::new(),
|
|
||||||
ancestor: H256::new(),
|
ancestor: H256::new(),
|
||||||
index: 0
|
index: 0,
|
||||||
|
blocks: {
|
||||||
|
let numbers_read = self.numbers.read().unwrap();
|
||||||
|
let mut adding = false;
|
||||||
|
|
||||||
|
let mut blocks = Vec::new();
|
||||||
|
for (_, hash) in numbers_read.iter().sort_by(|tuple1, tuple2| tuple1.0.cmp(tuple2.0)) {
|
||||||
|
if hash == to {
|
||||||
|
if adding {
|
||||||
|
blocks.push(hash.clone());
|
||||||
|
}
|
||||||
|
adding = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if hash == from {
|
||||||
|
adding = true;
|
||||||
|
}
|
||||||
|
if adding {
|
||||||
|
blocks.push(hash.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if adding { Vec::new() } else { blocks }
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -202,7 +229,6 @@ impl BlockChainClient for TestBlockChainClient {
|
|||||||
|
|
||||||
fn queue_info(&self) -> BlockQueueInfo {
|
fn queue_info(&self) -> BlockQueueInfo {
|
||||||
BlockQueueInfo {
|
BlockQueueInfo {
|
||||||
full: false,
|
|
||||||
verified_queue_size: 0,
|
verified_queue_size: 0,
|
||||||
unverified_queue_size: 0,
|
unverified_queue_size: 0,
|
||||||
verifying_queue_size: 0,
|
verifying_queue_size: 0,
|
||||||
@ -334,6 +360,11 @@ impl TestNet {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn sync_step_peer(&mut self, peer_num: usize) {
|
||||||
|
let mut peer = self.peer_mut(peer_num);
|
||||||
|
peer.sync.maintain_sync(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None));
|
||||||
|
}
|
||||||
|
|
||||||
pub fn restart_peer(&mut self, i: usize) {
|
pub fn restart_peer(&mut self, i: usize) {
|
||||||
let peer = self.peer_mut(i);
|
let peer = self.peer_mut(i);
|
||||||
peer.sync.restart(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None));
|
peer.sync.restart(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None));
|
||||||
@ -362,4 +393,9 @@ impl TestNet {
|
|||||||
pub fn done(&self) -> bool {
|
pub fn done(&self) -> bool {
|
||||||
self.peers.iter().all(|p| p.queue.is_empty())
|
self.peers.iter().all(|p| p.queue.is_empty())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn trigger_block_verified(&mut self, peer_id: usize) {
|
||||||
|
let mut peer = self.peer_mut(peer_id);
|
||||||
|
peer.sync.chain_blocks_verified(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -92,7 +92,6 @@ impl JournalDB {
|
|||||||
|
|
||||||
/// Commit all recent insert operations and historical removals from the old era
|
/// Commit all recent insert operations and historical removals from the old era
|
||||||
/// to the backing database.
|
/// to the backing database.
|
||||||
#[allow(cyclomatic_complexity)]
|
|
||||||
pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
|
pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
|
||||||
// journal format:
|
// journal format:
|
||||||
// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ]
|
// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ]
|
||||||
@ -105,6 +104,17 @@ impl JournalDB {
|
|||||||
// for each end_era that we journaled that we are no passing by,
|
// for each end_era that we journaled that we are no passing by,
|
||||||
// we remove all of its removes assuming it is canonical and all
|
// we remove all of its removes assuming it is canonical and all
|
||||||
// of its inserts otherwise.
|
// of its inserts otherwise.
|
||||||
|
//
|
||||||
|
// We also keep reference counters for each key inserted in the journal to handle
|
||||||
|
// the following cases where key K must not be deleted from the DB when processing removals :
|
||||||
|
// Given H is the journal size in eras, 0 <= C <= H.
|
||||||
|
// Key K is removed in era A(N) and re-inserted in canonical era B(N + C).
|
||||||
|
// Key K is removed in era A(N) and re-inserted in non-canonical era B`(N + C).
|
||||||
|
// Key K is added in non-canonical era A'(N) canonical B(N + C).
|
||||||
|
//
|
||||||
|
// The counter is encreased each time a key is inserted in the journal in the commit. The list of insertions
|
||||||
|
// is saved with the era record. When the era becomes end_era and goes out of journal the counter is decreased
|
||||||
|
// and the key is safe to delete.
|
||||||
|
|
||||||
// record new commit's details.
|
// record new commit's details.
|
||||||
let batch = WriteBatch::new();
|
let batch = WriteBatch::new();
|
||||||
@ -125,6 +135,7 @@ impl JournalDB {
|
|||||||
|
|
||||||
let mut r = RlpStream::new_list(3);
|
let mut r = RlpStream::new_list(3);
|
||||||
let inserts: Vec<H256> = self.overlay.keys().iter().filter(|&(_, &c)| c > 0).map(|(key, _)| key.clone()).collect();
|
let inserts: Vec<H256> = self.overlay.keys().iter().filter(|&(_, &c)| c > 0).map(|(key, _)| key.clone()).collect();
|
||||||
|
// Increase counter for each inserted key no matter if the block is canonical or not.
|
||||||
for i in &inserts {
|
for i in &inserts {
|
||||||
*counters.entry(i.clone()).or_insert(0) += 1;
|
*counters.entry(i.clone()).or_insert(0) += 1;
|
||||||
}
|
}
|
||||||
@ -139,6 +150,8 @@ impl JournalDB {
|
|||||||
if let Some((end_era, canon_id)) = end {
|
if let Some((end_era, canon_id)) = end {
|
||||||
let mut index = 0usize;
|
let mut index = 0usize;
|
||||||
let mut last;
|
let mut last;
|
||||||
|
let mut to_remove: Vec<H256> = Vec::new();
|
||||||
|
let mut canon_inserts: Vec<H256> = Vec::new();
|
||||||
while let Some(rlp_data) = try!(self.backing.get({
|
while let Some(rlp_data) = try!(self.backing.get({
|
||||||
let mut r = RlpStream::new_list(2);
|
let mut r = RlpStream::new_list(2);
|
||||||
r.append(&end_era);
|
r.append(&end_era);
|
||||||
@ -146,39 +159,33 @@ impl JournalDB {
|
|||||||
last = r.drain();
|
last = r.drain();
|
||||||
&last
|
&last
|
||||||
})) {
|
})) {
|
||||||
let to_add;
|
|
||||||
let rlp = Rlp::new(&rlp_data);
|
let rlp = Rlp::new(&rlp_data);
|
||||||
{
|
let inserts: Vec<H256> = rlp.val_at(1);
|
||||||
to_add = rlp.val_at(1);
|
JournalDB::decrease_counters(&inserts, &mut counters);
|
||||||
for i in &to_add {
|
// Collect keys to be removed. These are removed keys for canonical block, inserted for non-canonical
|
||||||
let delete_counter = {
|
if canon_id == rlp.val_at(0) {
|
||||||
if let Some(mut cnt) = counters.get_mut(i) {
|
to_remove.extend(rlp.at(2).iter().map(|r| r.as_val::<H256>()));
|
||||||
*cnt -= 1;
|
canon_inserts = inserts;
|
||||||
*cnt == 0
|
|
||||||
}
|
|
||||||
else { false }
|
|
||||||
|
|
||||||
};
|
|
||||||
if delete_counter {
|
|
||||||
counters.remove(i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
let to_remove: Vec<H256> = if canon_id == rlp.val_at(0) {rlp.val_at(2)} else {to_add};
|
else {
|
||||||
for i in &to_remove {
|
to_remove.extend(inserts);
|
||||||
if !counters.contains_key(i) {
|
|
||||||
batch.delete(&i).expect("Low-level database error. Some issue with your hard disk?");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
try!(batch.delete(&last));
|
try!(batch.delete(&last));
|
||||||
trace!("JournalDB: delete journal for time #{}.{}, (canon was {}): {} entries", end_era, index, canon_id, to_remove.len());
|
|
||||||
index += 1;
|
index += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let canon_inserts = canon_inserts.drain(..).collect::<HashSet<_>>();
|
||||||
|
// Purge removed keys if they are not referenced and not re-inserted in the canon commit
|
||||||
|
let mut deletes = 0;
|
||||||
|
for h in to_remove.iter().filter(|h| !counters.contains_key(h) && !canon_inserts.contains(h)) {
|
||||||
|
try!(batch.delete(&h));
|
||||||
|
deletes += 1;
|
||||||
|
}
|
||||||
try!(batch.put(&LAST_ERA_KEY, &encode(&end_era)));
|
try!(batch.put(&LAST_ERA_KEY, &encode(&end_era)));
|
||||||
|
trace!("JournalDB: delete journal for time #{}.{}, (canon was {}): {} entries", end_era, index, canon_id, deletes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Commit overlay insertions
|
||||||
let mut ret = 0u32;
|
let mut ret = 0u32;
|
||||||
let mut deletes = 0usize;
|
let mut deletes = 0usize;
|
||||||
for i in self.overlay.drain().into_iter() {
|
for i in self.overlay.drain().into_iter() {
|
||||||
@ -200,6 +207,21 @@ impl JournalDB {
|
|||||||
Ok(ret)
|
Ok(ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Decrease counters for given keys. Deletes obsolete counters
|
||||||
|
fn decrease_counters(keys: &[H256], counters: &mut HashMap<H256, i32>) {
|
||||||
|
for i in keys.iter() {
|
||||||
|
let delete_counter = {
|
||||||
|
let cnt = counters.get_mut(i).expect("Missing key counter");
|
||||||
|
*cnt -= 1;
|
||||||
|
*cnt == 0
|
||||||
|
};
|
||||||
|
if delete_counter {
|
||||||
|
counters.remove(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn payload(&self, key: &H256) -> Option<Bytes> {
|
fn payload(&self, key: &H256) -> Option<Bytes> {
|
||||||
self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec())
|
self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec())
|
||||||
}
|
}
|
||||||
@ -387,4 +409,21 @@ mod tests {
|
|||||||
jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap();
|
jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap();
|
||||||
assert!(jdb.exists(&foo));
|
assert!(jdb.exists(&foo));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn fork_same_key() {
|
||||||
|
// history is 1
|
||||||
|
let mut jdb = JournalDB::new_temp();
|
||||||
|
jdb.commit(0, &b"0".sha3(), None).unwrap();
|
||||||
|
|
||||||
|
let foo = jdb.insert(b"foo");
|
||||||
|
jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
|
||||||
|
|
||||||
|
jdb.insert(b"foo");
|
||||||
|
jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
|
||||||
|
assert!(jdb.exists(&foo));
|
||||||
|
|
||||||
|
jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap();
|
||||||
|
assert!(jdb.exists(&foo));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -599,6 +599,9 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
|||||||
|
|
||||||
fn start_session(&self, token: StreamToken, io: &IoContext<NetworkIoMessage<Message>>) {
|
fn start_session(&self, token: StreamToken, io: &IoContext<NetworkIoMessage<Message>>) {
|
||||||
let mut connections = self.connections.write().unwrap();
|
let mut connections = self.connections.write().unwrap();
|
||||||
|
if connections.get(token).is_none() {
|
||||||
|
return; // handshake expired
|
||||||
|
}
|
||||||
connections.replace_with(token, |c| {
|
connections.replace_with(token, |c| {
|
||||||
match Arc::try_unwrap(c).ok().unwrap().into_inner().unwrap() {
|
match Arc::try_unwrap(c).ok().unwrap().into_inner().unwrap() {
|
||||||
ConnectionEntry::Handshake(h) => {
|
ConnectionEntry::Handshake(h) => {
|
||||||
|
Loading…
Reference in New Issue
Block a user