diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 3df01d898..540b5ea40 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -83,6 +83,7 @@ pub mod header; pub mod service; pub mod spec; pub mod views; +pub mod receipt; mod common; mod basic_types; @@ -98,7 +99,6 @@ mod state; mod account; mod action_params; mod transaction; -mod receipt; mod null_engine; mod builtin; mod extras; diff --git a/ethcore/src/receipt.rs b/ethcore/src/receipt.rs index 43f674aa9..3888a6abc 100644 --- a/ethcore/src/receipt.rs +++ b/ethcore/src/receipt.rs @@ -1,3 +1,5 @@ +//! Receipt + use util::*; use basic_types::LogBloom; use log_entry::LogEntry; diff --git a/install-deps.sh b/install-deps.sh index 4eedab25e..d03098c42 100755 --- a/install-deps.sh +++ b/install-deps.sh @@ -105,7 +105,14 @@ function run_installer() done } - + function prompt_for_input() { + while : + do + read -p "$1 " imp + echo $imp + return + done + } function exe() { echo "\$ $@"; "$@" @@ -347,8 +354,31 @@ function run_installer() fi } + function linux_version() + { + source /etc/lsb-release + + if [[ $DISTRIB_ID == "Ubuntu" ]]; then + if [[ $DISTRIB_RELEASE == "14.04" ]]; then + check "Ubuntu-14.04" + isUbuntu1404=true + else + check "Ubuntu, but not 14.04" + isUbuntu1404=false + fi + else + check "Ubuntu not found" + isUbuntu1404=false + fi + } + function get_linux_dependencies() { + linux_version + + find_multirust + find_rocksdb + find_curl find_git find_make @@ -357,6 +387,46 @@ function run_installer() find_apt } + function find_rocksdb() + { + depCount=$((depCount+1)) + if [[ $(ldconfig -v 2>/dev/null | grep rocksdb | wc -l) == 1 ]]; then + depFound=$((depFound+1)) + check "apt-get" + isRocksDB=true + else + uncheck "librocksdb is missing" + isRocksDB=false + INSTALL_FILES+="${blue}${dim}==> librocksdb:${reset}\n" + fi + } + + function find_multirust() + { + depCount=$((depCount+2)) + MULTIRUST_PATH=`which multirust 2>/dev/null` + if [[ -f $MULTIRUST_PATH ]]; then + depFound=$((depFound+1)) + check "multirust" + isMultirust=true + if [[ $(multirust show-default 2>/dev/null | grep nightly | wc -l) == 4 ]]; then + depFound=$((depFound+1)) + check "rust nightly" + isMultirustNightly=true + else + uncheck "rust is not nightly" + isMultirustNightly=false + INSTALL_FILES+="${blue}${dim}==> multirust -> rust nightly:${reset}\n" + fi + else + uncheck "multirust is missing" + uncheck "rust nightly is missing" + isMultirust=false + isMultirustNightly=false + INSTALL_FILES+="${blue}${dim}==> multirust:${reset}\n" + fi + } + function find_apt() { depCount=$((depCount+1)) @@ -367,7 +437,6 @@ function run_installer() then depFound=$((depFound+1)) check "apt-get" - echo "$($APT_PATH -v)" isApt=true else uncheck "apt-get is missing" @@ -435,7 +504,7 @@ function run_installer() function find_curl() { depCount=$((depCount+1)) - MAKE_PATH=`which curl 2>/dev/null` + CURL_PATH=`which curl 2>/dev/null` if [[ -f $CURL_PATH ]] then @@ -449,25 +518,39 @@ function run_installer() fi } + function ubuntu1404_rocksdb_installer() + { + sudo apt-get update -qq + sudo apt-get install -qq -y software-properties-common + sudo apt-add-repository -y ppa:giskou/librocksdb + sudo apt-get -f -y install + sudo apt-get update -qq + sudo apt-get install -qq -y librocksdb + } + function linux_rocksdb_installer() { - oldpwd=`pwd` - cd /tmp - exe git clone --branch v4.1 --depth=1 https://github.com/facebook/rocksdb.git - cd rocksdb - exe make shared_lib - sudo cp -a librocksdb.so* /usr/lib - sudo ldconfig - cd /tmp - rm -rf /tmp/rocksdb - cd $oldpwd + if [[ $isUbuntu1404 == true ]]; then + ubuntu1404_rocksdb_installer + else + oldpwd=`pwd` + cd /tmp + exe git clone --branch v4.1 --depth=1 https://github.com/facebook/rocksdb.git + cd rocksdb + exe make shared_lib + sudo cp -a librocksdb.so* /usr/lib + sudo ldconfig + cd /tmp + rm -rf /tmp/rocksdb + cd $oldpwd + fi } function linux_installer() { if [[ $isGCC == false || $isGit == false || $isMake == false || $isCurl == false ]]; then info "Installing build dependencies..." - sudo apt-get update + sudo apt-get update -qq if [[ $isGit == false ]]; then sudo apt-get install -q -y git fi @@ -483,15 +566,24 @@ function run_installer() echo fi - info "Installing rocksdb..." - linux_rocksdb_installer - echo + if [[ $isRocksDB == false ]]; then + info "Installing rocksdb..." + linux_rocksdb_installer + echo + fi - info "Installing multirust..." - curl -sf https://raw.githubusercontent.com/brson/multirust/master/blastoff.sh | sudo sh -s -- --yes - sudo multirust update nightly - sudo multirust default nightly - echo + if [[ $isMultirust == false ]]; then + info "Installing multirust..." + curl -sf https://raw.githubusercontent.com/brson/multirust/master/blastoff.sh | sudo sh -s -- --yes + echo + fi + + if [[ $isMultirustNightly == false ]]; then + info "Installing rust nightly..." + sudo multirust update nightly + sudo multirust default nightly + echo + fi } function install() @@ -511,12 +603,111 @@ function run_installer() function verify_installation() { info "Verifying installation" -# find_eth -# if [[ $isEth == false ]] -# then -# abortInstall -# fi + if [[ $OS_TYPE == "linux" ]]; then + find_curl + find_git + find_make + find_gcc + find_rocksdb + find_multirust + + if [[ $isCurl == false || $isGit == false || $isMake == false || $isGCC == false || $isRocksDB == false || $isMultirustNightly == false ]]; then + abortInstall + fi + fi + } + + function build_parity() + { + info "Downloading Parity..." + git clone git@github.com:ethcore/parity + cd parity + git submodule init + git submodule update + + info "Building..." + cargo build --release + + cd .. + + echo + head "Parity is built!" + info "Parity source code is in ${b}$(pwd)/parity${reset}. From that path, you can:" + info "- Run a client & sync the chain with:" + info " ${b}cargo run --release${reset}" + info "- Run a JSONRPC-capable client (for use with netstats) with:" + info " ${b}cargo run --release -- -j --jsonrpc-url 127.0.0.1:8545${reset}" + info "- Run tests with:" + info " ${b}cargo test --release --features ethcore/json-tests -p ethcore${reset}" + info "- Install the client with:" + info " ${b}sudo cp target/release/parity /usr/bin${reset}" + echo + } + + function install_netstats() + { + echo "Installing netstats" + + secret=$(prompt_for_input "Please enter the netstats secret:") + instance_name=$(prompt_for_input "Please enter your instance name:") + contact_details=$(prompt_for_input "Please enter your contact details (optional):") + + # install ethereum & install dependencies + sudo apt-get install -y -qq build-essential git unzip wget nodejs npm ntp cloud-utils + + # add node symlink if it doesn't exist + [[ ! -f /usr/bin/node ]] && sudo ln -s /usr/bin/nodejs /usr/bin/node + + # set up time update cronjob + sudo bash -c "cat > /etc/cron.hourly/ntpdate << EOF + #!/bin/sh + pm2 flush + sudo service ntp stop + sudo ntpdate -s ntp.ubuntu.com + sudo service ntp start + EOF" + + sudo chmod 755 /etc/cron.hourly/ntpdate + + [ ! -d "www" ] && git clone https://github.com/cubedro/eth-net-intelligence-api netstats + cd netstats + git pull + git checkout 95d595258239a0fdf56b97dedcfb2be62f6170e6 + + sudo npm install + sudo npm install pm2 -g + + cat > app.json << EOL +[ + { + "name" : "node-app", + "script" : "app.js", + "log_date_format" : "YYYY-MM-DD HH:mm Z", + "merge_logs" : false, + "watch" : false, + "max_restarts" : 10, + "exec_interpreter" : "node", + "exec_mode" : "fork_mode", + "env": + { + "NODE_ENV" : "production", + "RPC_HOST" : "localhost", + "RPC_PORT" : "8545", + "LISTENING_PORT" : "30303", + "INSTANCE_NAME" : "${instance_name}", + "CONTACT_DETAILS" : "${contact_details}", + "WS_SERVER" : "wss://rpc.ethstats.net", + "WS_SECRET" : "${secret}", + "VERBOSITY" : 2 + + } + } +] +EOL + + pm2 start app.json + cd .. } function abortInstall() @@ -530,21 +721,28 @@ function run_installer() function finish() { -# echo -# successHeading "Installation successful!" -# head "Next steps" -# info "Run ${cyan}\`\`${reset} to get started.${reset}" -# echo + echo + successHeading "Installation successful!" + echo exit 0 } + + ####### Run the script + tput clear + echo + echo + echo " ${blue}∷ ${b}${green} WELCOME TO PARITY ${reset} ${blue}∷${reset}" + echo + echo + # Check dependencies head "Checking OS dependencies" detectOS if [[ $INSTALL_FILES != "" ]]; then echo - head "In addition to the parity build dependencies, this script will install:" + head "In addition to the Parity build dependencies, this script will install:" echo "$INSTALL_FILES" echo fi @@ -558,6 +756,20 @@ function run_installer() # Check installation verify_installation + if [[ ! -e parity ]]; then + # Maybe install parity + if wait_for_user "${b}Build dependencies installed B-)!${reset} Would you like to download and build parity?"; then + # Do get parity. + build_parity + fi + fi + + if [[ $OS_TYPE == "linux" && $DISTRIB_ID == "Ubuntu" ]]; then + if wait_for_user "${b}Netstats:${reset} Would you like to install and configure a netstats client?"; then + install_netstats + fi + fi + # Display goodby message finish } diff --git a/install-parity.sh b/install-parity.sh index 848c25c6d..3217cc284 100755 --- a/install-parity.sh +++ b/install-parity.sh @@ -1,9 +1,13 @@ #!/usr/bin/env bash + + +PARITY_DEB_URL=https://github.com/ethcore/parity/releases/download/beta-0.9/parity_0.9.0-0_amd64.deb + function run_installer() { ####### Init vars - + HOMEBREW_PREFIX=/usr/local HOMEBREW_CACHE=/Library/Caches/Homebrew HOMEBREW_REPO=https://github.com/Homebrew/homebrew @@ -22,6 +26,7 @@ function run_installer() isGit=false isRuby=false isBrew=false + isDocker=false canContinue=true depCount=0 depFound=0 @@ -81,11 +86,11 @@ function run_installer() } function check() { - echo "${green}${bold} ✓${reset} $1${reset}" + echo "${green}${bold} ✓${reset} $1${reset}" } function uncheck() { - echo "${red}${bold} ✘${reset} $1${reset}" + echo "${red}${bold} ✘${reset} $1${reset}" } @@ -97,19 +102,23 @@ function run_installer() do read -p "${blue}==>${reset} $1 [Y/n] " imp case $imp in - [yY] ) echo; break ;; + [yY] ) return 0; break ;; '' ) echo; break ;; - [nN] ) abortInstall "${red}==>${reset} Process stopped by user. To resume the install run the one-liner command again." ;; + [nN] ) return 1 ;; * ) echo "Unrecognized option provided. Please provide either 'Y' or 'N'"; esac done } - - - function exe() { - echo "\$ $@"; "$@" + function prompt_for_input() { + while : + do + read -p "$1 " imp + echo $imp + return + done } + function detectOS() { if [[ "$OSTYPE" == "linux-gnu" ]] @@ -185,7 +194,7 @@ function run_installer() fi errorMessages+="${red}==>${reset} ${b}Mac OS version too old:${reset} eth requires OS X version ${red}$OSX_REQUIERED_VERSION${reset} at least in order to run.\n" - errorMessages+=" Please update the OS and reload the install process.\n" + errorMessages+=" Please update the OS and reload the install process.\n" } function find_eth() @@ -195,7 +204,6 @@ function run_installer() if [[ -f $ETH_PATH ]] then check "Found parity: $ETH_PATH" - echo "$($ETH_PATH -V)" isEth=true else uncheck "parity is missing" @@ -237,7 +245,7 @@ function run_installer() isRuby=false canContinue=false errorMessages+="${red}==>${reset} ${b}Couldn't find Ruby:${reset} Brew requires Ruby which could not be found.\n" - errorMessages+=" Please install Ruby using these instructions ${u}${blue}https://www.ruby-lang.org/en/documentation/installation/${reset}.\n" + errorMessages+=" Please install Ruby using these instructions ${u}${blue}https://www.ruby-lang.org/en/documentation/installation/${reset}.\n" fi } @@ -255,9 +263,9 @@ function run_installer() isBrew=false INSTALL_FILES+="${blue}${dim}==> Homebrew:${reset}\n" - INSTALL_FILES+=" ${blue}${dim}➜${reset} $HOMEBREW_PREFIX/bin/brew\n" - INSTALL_FILES+=" ${blue}${dim}➜${reset} $HOMEBREW_PREFIX/Library\n" - INSTALL_FILES+=" ${blue}${dim}➜${reset} $HOMEBREW_PREFIX/share/man/man1/brew.1\n" + INSTALL_FILES+=" ${blue}${dim}➜${reset} $HOMEBREW_PREFIX/bin/brew\n" + INSTALL_FILES+=" ${blue}${dim}➜${reset} $HOMEBREW_PREFIX/Library\n" + INSTALL_FILES+=" ${blue}${dim}➜${reset} $HOMEBREW_PREFIX/share/man/man1/brew.1\n" fi depCount=$((depCount+1)) @@ -317,20 +325,20 @@ function run_installer() osx_dependency_installer info "Adding ethcore repository" - exe brew tap ethcore/ethcore git@github.com:ethcore/homebrew-ethcore.git + brew tap ethcore/ethcore https://github.com/ethcore/homebrew-ethcore.git echo info "Updating brew" - exe brew update + brew update echo info "Installing parity" if [[ $isEth == true ]] then - exe brew reinstall parity + brew reinstall parity else - exe brew install parity - exe brew linkapps parity + brew install parity + brew linkapps parity fi echo } @@ -356,6 +364,7 @@ function run_installer() function get_linux_dependencies() { find_apt + find_docker } function find_apt() @@ -372,38 +381,96 @@ function run_installer() isApt=false fi } + + function find_docker() + { + DOCKER_PATH=`which docker 2>/dev/null` + + if [[ -f $DOCKER_PATH ]] + then + check "docker" + echo "$($DOCKER_PATH -v)" + isDocker=true + else + isDocker=false + fi + } function linux_rocksdb_installer() { - oldpwd=`pwd` - cd /tmp - exe git clone --branch v4.1 --depth=1 https://github.com/facebook/rocksdb.git - cd rocksdb - exe make shared_lib - sudo cp -a librocksdb.so* /usr/lib - sudo ldconfig - cd /tmp - rm -rf /tmp/rocksdb - cd $oldpwd + sudo add-apt-repository -y ppa:giskou/librocksdb + sudo apt-get -f -y install + sudo apt-get update + sudo apt-get install -y librocksdb } function linux_installer() { - info "Installing git" - sudo apt-get install -q -y git + info "Installing dependencies" + sudo apt-get update && sudo apt-get install -q -y git curl g++ wget echo info "Installing rocksdb" linux_rocksdb_installer echo - info "Installing multirust" - curl -sf https://raw.githubusercontent.com/brson/multirust/master/blastoff.sh | sudo sh -s -- --yes - sudo multirust update nightly - sudo multirust default nightly - echo - info "Installing parity" - wget --quiet --output-document=- http://ethcore.io/download/parity.deb | dpkg --install - + file=/tmp/parity.deb + + + wget $PARITY_DEB_URL -qO $file + sudo dpkg -i $file + rm $file + } + + function install_netstats() + { + echo "install netstats" + + if [[ $isDocker == false ]] + then + info "installing docker" + curl -sSL https://get.docker.com/ | sh + fi + + dir=$HOME/.netstats + + secret=$(prompt_for_input "Please enter the netstats secret:") + instance_name=$(prompt_for_input "Please enter your instance name:") + contact_details=$(prompt_for_input "Please enter your contact details (optional):") + + + mkdir -p $dir + cat > $dir/app.json << EOL +[ + { + "name" : "node-app", + "script" : "app.js", + "log_date_format" : "YYYY-MM-DD HH:mm Z", + "merge_logs" : false, + "watch" : false, + "max_restarts" : 10, + "exec_interpreter" : "node", + "exec_mode" : "fork_mode", + "env": + { + "NODE_ENV" : "production", + "RPC_HOST" : "localhost", + "RPC_PORT" : "8545", + "LISTENING_PORT" : "30303", + "INSTANCE_NAME" : "${instance_name}", + "CONTACT_DETAILS" : "${contact_details}", + "WS_SERVER" : "wss://rpc.ethstats.net", + "WS_SECRET" : "${secret}", + "VERBOSITY" : 2 + + } + } +] +EOL + + sudo docker rm --force netstats-client 2> /dev/null + sudo docker pull ethcore/netstats-client + sudo docker run -d --net=host --name netstats-client -v $dir/app.json:/home/ethnetintel/eth-net-intelligence-api/app.json ethcore/netstats-client } function install() @@ -442,11 +509,11 @@ function run_installer() function finish() { -# echo -# successHeading "Installation successful!" -# head "Next steps" -# info "Run ${cyan}\`\`${reset} to get started.${reset}" -# echo + echo + successHeading "Installation successful!" + # head "Next steps" + # info "Run ${cyan}\`\`${reset} to get started.${reset}" + echo exit 0 } @@ -460,11 +527,26 @@ function run_installer() echo # Prompt user to continue or abort - wait_for_user "${b}OK,${reset} let's go!" + if wait_for_user "${b}OK,${reset} let's go!" + then + echo "Installing..." + else + abortInstall "${red}==>${reset} Process stopped by user. To resume the install run the one-liner command again." + fi # Install dependencies and eth install + if [[ $OS_TYPE == "linux" ]] + then + echo "Netstats:" + head "Would you like to install and configure a netstats client?" + if wait_for_user "${b}OK,${reset} let's go!" + then + install_netstats + fi + fi + # Check installation verify_installation diff --git a/sync/cov.sh b/sync/cov.sh new file mode 100755 index 000000000..5e95542fa --- /dev/null +++ b/sync/cov.sh @@ -0,0 +1,9 @@ +if ! type kcov > /dev/null; then + echo "Install kcov first (details inside this file). Aborting." + exit 1 +fi + +cargo test --no-run || exit $? +mkdir -p target/coverage +kcov --exclude-pattern ~/.multirust,rocksdb,secp256k1,sync/src/tests --include-pattern sync/src --verify target/coverage target/debug/ethsync* +xdg-open target/coverage/index.html diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 665508d92..e143f20b1 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -177,6 +177,7 @@ pub struct ChainSync { have_common_block: bool, } +type RlpResponseResult = Result, PacketDecodeError>; impl ChainSync { /// Create a new instance of syncing strategy. @@ -845,7 +846,7 @@ impl ChainSync { } /// Respond to GetBlockHeaders request - fn return_block_headers(&self, io: &mut SyncIo, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { + fn return_block_headers(io: &SyncIo, r: &UntrustedRlp) -> RlpResponseResult { // Packet layout: // [ block: { P , B_32 }, maxHeaders: P, skip: P, reverse: P in { 0 , 1 } ] let max_headers: usize = try!(r.val_at(1)); @@ -892,18 +893,16 @@ impl ChainSync { } let mut rlp = RlpStream::new_list(count as usize); rlp.append_raw(&data, count as usize); - io.respond(BLOCK_HEADERS_PACKET, rlp.out()).unwrap_or_else(|e| - debug!(target: "sync", "Error sending headers: {:?}", e)); trace!(target: "sync", "-> GetBlockHeaders: returned {} entries", count); - Ok(()) + Ok(Some((BLOCK_HEADERS_PACKET, rlp))) } /// Respond to GetBlockBodies request - fn return_block_bodies(&self, io: &mut SyncIo, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { + fn return_block_bodies(io: &SyncIo, r: &UntrustedRlp) -> RlpResponseResult { let mut count = r.item_count(); if count == 0 { debug!(target: "sync", "Empty GetBlockBodies request, ignoring."); - return Ok(()); + return Ok(None); } trace!(target: "sync", "-> GetBlockBodies: {} entries", count); count = min(count, MAX_BODIES_TO_SEND); @@ -917,18 +916,16 @@ impl ChainSync { } let mut rlp = RlpStream::new_list(added); rlp.append_raw(&data, added); - io.respond(BLOCK_BODIES_PACKET, rlp.out()).unwrap_or_else(|e| - debug!(target: "sync", "Error sending headers: {:?}", e)); trace!(target: "sync", "-> GetBlockBodies: returned {} entries", added); - Ok(()) + Ok(Some((BLOCK_BODIES_PACKET, rlp))) } /// Respond to GetNodeData request - fn return_node_data(&self, io: &mut SyncIo, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { + fn return_node_data(io: &SyncIo, r: &UntrustedRlp) -> RlpResponseResult { let mut count = r.item_count(); if count == 0 { debug!(target: "sync", "Empty GetNodeData request, ignoring."); - return Ok(()); + return Ok(None); } count = min(count, MAX_NODE_DATA_TO_SEND); let mut added = 0usize; @@ -941,32 +938,43 @@ impl ChainSync { } let mut rlp = RlpStream::new_list(added); rlp.append_raw(&data, added); - io.respond(NODE_DATA_PACKET, rlp.out()).unwrap_or_else(|e| - debug!(target: "sync", "Error sending headers: {:?}", e)); - Ok(()) + Ok(Some((NODE_DATA_PACKET, rlp))) } - /// Respond to GetReceipts request - fn return_receipts(&self, io: &mut SyncIo, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { - let mut count = r.item_count(); + fn return_receipts(io: &SyncIo, rlp: &UntrustedRlp) -> RlpResponseResult { + let mut count = rlp.item_count(); if count == 0 { debug!(target: "sync", "Empty GetReceipts request, ignoring."); - return Ok(()); + return Ok(None); } count = min(count, MAX_RECEIPTS_TO_SEND); let mut added = 0usize; let mut data = Bytes::new(); for i in 0..count { - if let Some(mut hdr) = io.chain().block_receipts(&try!(r.val_at::(i))) { + if let Some(mut hdr) = io.chain().block_receipts(&try!(rlp.val_at::(i))) { data.append(&mut hdr); added += 1; } } - let mut rlp = RlpStream::new_list(added); - rlp.append_raw(&data, added); - io.respond(RECEIPTS_PACKET, rlp.out()).unwrap_or_else(|e| - debug!(target: "sync", "Error sending headers: {:?}", e)); - Ok(()) + let mut rlp_result = RlpStream::new_list(added); + rlp_result.append_raw(&data, added); + Ok(Some((RECEIPTS_PACKET, rlp_result))) + } + + fn return_rlp(&self, io: &mut SyncIo, rlp: &UntrustedRlp, rlp_func: FRlp, error_func: FError) -> Result<(), PacketDecodeError> + where FRlp : Fn(&SyncIo, &UntrustedRlp) -> RlpResponseResult, + FError : FnOnce(UtilError) -> String + { + let response = rlp_func(io, rlp); + match response { + Err(e) => Err(e), + Ok(Some((packet_id, rlp_stream))) => { + io.respond(packet_id, rlp_stream.out()).unwrap_or_else( + |e| debug!(target: "sync", "{:?}", error_func(e))); + Ok(()) + } + _ => Ok(()) + } } /// Dispatch incoming requests and responses @@ -975,14 +983,27 @@ impl ChainSync { let result = match packet_id { STATUS_PACKET => self.on_peer_status(io, peer, &rlp), TRANSACTIONS_PACKET => self.on_peer_transactions(io, peer, &rlp), - GET_BLOCK_HEADERS_PACKET => self.return_block_headers(io, &rlp), BLOCK_HEADERS_PACKET => self.on_peer_block_headers(io, peer, &rlp), - GET_BLOCK_BODIES_PACKET => self.return_block_bodies(io, &rlp), BLOCK_BODIES_PACKET => self.on_peer_block_bodies(io, peer, &rlp), NEW_BLOCK_PACKET => self.on_peer_new_block(io, peer, &rlp), NEW_BLOCK_HASHES_PACKET => self.on_peer_new_hashes(io, peer, &rlp), - GET_NODE_DATA_PACKET => self.return_node_data(io, &rlp), - GET_RECEIPTS_PACKET => self.return_receipts(io, &rlp), + + GET_BLOCK_BODIES_PACKET => self.return_rlp(io, &rlp, + ChainSync::return_block_bodies, + |e| format!("Error sending block bodies: {:?}", e)), + + GET_BLOCK_HEADERS_PACKET => self.return_rlp(io, &rlp, + ChainSync::return_block_headers, + |e| format!("Error sending block headers: {:?}", e)), + + GET_RECEIPTS_PACKET => self.return_rlp(io, &rlp, + ChainSync::return_receipts, + |e| format!("Error sending receipts: {:?}", e)), + + GET_NODE_DATA_PACKET => self.return_rlp(io, &rlp, + ChainSync::return_node_data, + |e| format!("Error sending nodes: {:?}", e)), + _ => { debug!(target: "sync", "Unknown packet {}", packet_id); Ok(()) @@ -1013,3 +1034,78 @@ impl ChainSync { } } } + +#[cfg(test)] +mod tests { + use tests::helpers::*; + use super::*; + use util::*; + + #[test] + fn return_receipts_empty() { + let mut client = TestBlockChainClient::new(); + let mut queue = VecDeque::new(); + let io = TestIo::new(&mut client, &mut queue, None); + + let result = ChainSync::return_receipts(&io, &UntrustedRlp::new(&[0xc0])); + + assert!(result.is_ok()); + } + + #[test] + fn return_receipts() { + let mut client = TestBlockChainClient::new(); + let mut queue = VecDeque::new(); + let mut io = TestIo::new(&mut client, &mut queue, None); + + let mut receipt_list = RlpStream::new_list(4); + receipt_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555")); + receipt_list.append(&H256::from("ff00000000000000000000000000000000000000000000000000000000000000")); + receipt_list.append(&H256::from("fff0000000000000000000000000000000000000000000000000000000000000")); + receipt_list.append(&H256::from("aff0000000000000000000000000000000000000000000000000000000000000")); + + let receipts_request = receipt_list.out(); + // it returns rlp ONLY for hashes started with "f" + let result = ChainSync::return_receipts(&io, &UntrustedRlp::new(&receipts_request.clone())); + + assert!(result.is_ok()); + let rlp_result = result.unwrap(); + assert!(rlp_result.is_some()); + + // the length of two rlp-encoded receipts + assert_eq!(597, rlp_result.unwrap().1.out().len()); + + let mut sync = ChainSync::new(); + io.sender = Some(2usize); + sync.on_packet(&mut io, 1usize, super::GET_RECEIPTS_PACKET, &receipts_request); + assert_eq!(1, io.queue.len()); + } + + #[test] + fn return_nodes() { + let mut client = TestBlockChainClient::new(); + let mut queue = VecDeque::new(); + let mut io = TestIo::new(&mut client, &mut queue, None); + + let mut node_list = RlpStream::new_list(3); + node_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555")); + node_list.append(&H256::from("ffffffffffffffffffffffffffffffffffffffffffffaaaaaaaaaaaaaaaaaaaa")); + node_list.append(&H256::from("aff0000000000000000000000000000000000000000000000000000000000000")); + + let node_request = node_list.out(); + // it returns rlp ONLY for hashes started with "f" + let result = ChainSync::return_node_data(&io, &UntrustedRlp::new(&node_request.clone())); + + assert!(result.is_ok()); + let rlp_result = result.unwrap(); + assert!(rlp_result.is_some()); + + // the length of one rlp-encoded hashe + assert_eq!(34, rlp_result.unwrap().1.out().len()); + + let mut sync = ChainSync::new(); + io.sender = Some(2usize); + sync.on_packet(&mut io, 1usize, super::GET_NODE_DATA_PACKET, &node_request); + assert_eq!(1, io.queue.len()); + } +} \ No newline at end of file diff --git a/sync/src/lib.rs b/sync/src/lib.rs index f3b43396c..1523a8a9f 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -109,6 +109,4 @@ impl NetworkProtocolHandler for EthSync { self.sync.write().unwrap().maintain_peers(&mut NetSyncIo::new(io, self.chain.deref())); self.sync.write().unwrap().maintain_sync(&mut NetSyncIo::new(io, self.chain.deref())); } -} - - +} \ No newline at end of file diff --git a/sync/src/tests/chain.rs b/sync/src/tests/chain.rs new file mode 100644 index 000000000..fcd9b6a7b --- /dev/null +++ b/sync/src/tests/chain.rs @@ -0,0 +1,91 @@ +use util::*; +use ethcore::client::{BlockChainClient}; +use io::SyncIo; +use chain::{SyncState}; +use super::helpers::*; + +#[test] +fn two_peers() { + ::env_logger::init().ok(); + let mut net = TestNet::new(3); + net.peer_mut(1).chain.add_blocks(1000, false); + net.peer_mut(2).chain.add_blocks(1000, false); + net.sync(); + assert!(net.peer(0).chain.block_at(1000).is_some()); + assert_eq!(net.peer(0).chain.blocks.read().unwrap().deref(), net.peer(1).chain.blocks.read().unwrap().deref()); +} + +#[test] +fn status_after_sync() { + ::env_logger::init().ok(); + let mut net = TestNet::new(3); + net.peer_mut(1).chain.add_blocks(1000, false); + net.peer_mut(2).chain.add_blocks(1000, false); + net.sync(); + let status = net.peer(0).sync.status(); + assert_eq!(status.state, SyncState::Idle); +} + +#[test] +fn takes_few_steps() { + let mut net = TestNet::new(3); + net.peer_mut(1).chain.add_blocks(100, false); + net.peer_mut(2).chain.add_blocks(100, false); + let total_steps = net.sync(); + assert!(total_steps < 7); +} + +#[test] +fn empty_blocks() { + ::env_logger::init().ok(); + let mut net = TestNet::new(3); + for n in 0..200 { + net.peer_mut(1).chain.add_blocks(5, n % 2 == 0); + net.peer_mut(2).chain.add_blocks(5, n % 2 == 0); + } + net.sync(); + assert!(net.peer(0).chain.block_at(1000).is_some()); + assert_eq!(net.peer(0).chain.blocks.read().unwrap().deref(), net.peer(1).chain.blocks.read().unwrap().deref()); +} + +#[test] +fn forked() { + ::env_logger::init().ok(); + let mut net = TestNet::new(3); + net.peer_mut(0).chain.add_blocks(300, false); + net.peer_mut(1).chain.add_blocks(300, false); + net.peer_mut(2).chain.add_blocks(300, false); + net.peer_mut(0).chain.add_blocks(100, true); //fork + net.peer_mut(1).chain.add_blocks(200, false); + net.peer_mut(2).chain.add_blocks(200, false); + net.peer_mut(1).chain.add_blocks(100, false); //fork between 1 and 2 + net.peer_mut(2).chain.add_blocks(10, true); + // peer 1 has the best chain of 601 blocks + let peer1_chain = net.peer(1).chain.numbers.read().unwrap().clone(); + net.sync(); + assert_eq!(net.peer(0).chain.numbers.read().unwrap().deref(), &peer1_chain); + assert_eq!(net.peer(1).chain.numbers.read().unwrap().deref(), &peer1_chain); + assert_eq!(net.peer(2).chain.numbers.read().unwrap().deref(), &peer1_chain); +} + +#[test] +fn restart() { + let mut net = TestNet::new(3); + net.peer_mut(1).chain.add_blocks(1000, false); + net.peer_mut(2).chain.add_blocks(1000, false); + + net.sync_steps(8); + + // make sure that sync has actually happened + assert!(net.peer(0).chain.chain_info().best_block_number > 100); + net.restart_peer(0); + + let status = net.peer(0).sync.status(); + assert_eq!(status.state, SyncState::NotSynced); +} + +#[test] +fn status_empty() { + let net = TestNet::new(2); + assert_eq!(net.peer(0).sync.status().state, SyncState::NotSynced); +} \ No newline at end of file diff --git a/sync/src/tests.rs b/sync/src/tests/helpers.rs similarity index 69% rename from sync/src/tests.rs rename to sync/src/tests/helpers.rs index 88f19a8b6..c4a4d80cb 100644 --- a/sync/src/tests.rs +++ b/sync/src/tests/helpers.rs @@ -4,18 +4,19 @@ use ethcore::block_queue::BlockQueueInfo; use ethcore::header::{Header as BlockHeader, BlockNumber}; use ethcore::error::*; use io::SyncIo; -use chain::{ChainSync, SyncState}; +use chain::{ChainSync}; +use ethcore::receipt::Receipt; -struct TestBlockChainClient { - blocks: RwLock>, - numbers: RwLock>, - genesis_hash: H256, - last_hash: RwLock, - difficulty: RwLock, +pub struct TestBlockChainClient { + pub blocks: RwLock>, + pub numbers: RwLock>, + pub genesis_hash: H256, + pub last_hash: RwLock, + pub difficulty: RwLock, } impl TestBlockChainClient { - fn new() -> TestBlockChainClient { + pub fn new() -> TestBlockChainClient { let mut client = TestBlockChainClient { blocks: RwLock::new(HashMap::new()), @@ -116,11 +117,28 @@ impl BlockChainClient for TestBlockChainClient { }) } - fn state_data(&self, _h: &H256) -> Option { + // TODO: returns just hashes instead of node state rlp(?) + fn state_data(&self, hash: &H256) -> Option { + // starts with 'f' ? + if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") { + let mut rlp = RlpStream::new(); + rlp.append(&hash.clone()); + return Some(rlp.out()); + } None } - fn block_receipts(&self, _h: &H256) -> Option { + fn block_receipts(&self, hash: &H256) -> Option { + // starts with 'f' ? + if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") { + let receipt = Receipt::new( + H256::zero(), + U256::zero(), + vec![]); + let mut rlp = RlpStream::new(); + rlp.append(&receipt); + return Some(rlp.out()); + } None } @@ -189,14 +207,14 @@ impl BlockChainClient for TestBlockChainClient { } } -struct TestIo<'p> { - chain: &'p mut TestBlockChainClient, - queue: &'p mut VecDeque, - sender: Option, +pub struct TestIo<'p> { + pub chain: &'p mut TestBlockChainClient, + pub queue: &'p mut VecDeque, + pub sender: Option, } impl<'p> TestIo<'p> { - fn new(chain: &'p mut TestBlockChainClient, queue: &'p mut VecDeque, sender: Option) -> TestIo<'p> { + pub fn new(chain: &'p mut TestBlockChainClient, queue: &'p mut VecDeque, sender: Option) -> TestIo<'p> { TestIo { chain: chain, queue: queue, @@ -235,21 +253,21 @@ impl<'p> SyncIo for TestIo<'p> { } } -struct TestPacket { - data: Bytes, - packet_id: PacketId, - recipient: PeerId, +pub struct TestPacket { + pub data: Bytes, + pub packet_id: PacketId, + pub recipient: PeerId, } -struct TestPeer { - chain: TestBlockChainClient, - sync: ChainSync, - queue: VecDeque, +pub struct TestPeer { + pub chain: TestBlockChainClient, + pub sync: ChainSync, + pub queue: VecDeque, } -struct TestNet { - peers: Vec, - started: bool, +pub struct TestNet { + pub peers: Vec, + pub started: bool, } impl TestNet { @@ -329,89 +347,3 @@ impl TestNet { self.peers.iter().all(|p| p.queue.is_empty()) } } - -#[test] -fn chain_two_peers() { - ::env_logger::init().ok(); - let mut net = TestNet::new(3); - net.peer_mut(1).chain.add_blocks(1000, false); - net.peer_mut(2).chain.add_blocks(1000, false); - net.sync(); - assert!(net.peer(0).chain.block_at(1000).is_some()); - assert_eq!(net.peer(0).chain.blocks.read().unwrap().deref(), net.peer(1).chain.blocks.read().unwrap().deref()); -} - -#[test] -fn chain_status_after_sync() { - ::env_logger::init().ok(); - let mut net = TestNet::new(3); - net.peer_mut(1).chain.add_blocks(1000, false); - net.peer_mut(2).chain.add_blocks(1000, false); - net.sync(); - let status = net.peer(0).sync.status(); - assert_eq!(status.state, SyncState::Idle); -} - -#[test] -fn chain_takes_few_steps() { - let mut net = TestNet::new(3); - net.peer_mut(1).chain.add_blocks(100, false); - net.peer_mut(2).chain.add_blocks(100, false); - let total_steps = net.sync(); - assert!(total_steps < 7); -} - -#[test] -fn chain_empty_blocks() { - ::env_logger::init().ok(); - let mut net = TestNet::new(3); - for n in 0..200 { - net.peer_mut(1).chain.add_blocks(5, n % 2 == 0); - net.peer_mut(2).chain.add_blocks(5, n % 2 == 0); - } - net.sync(); - assert!(net.peer(0).chain.block_at(1000).is_some()); - assert_eq!(net.peer(0).chain.blocks.read().unwrap().deref(), net.peer(1).chain.blocks.read().unwrap().deref()); -} - -#[test] -fn chain_forked() { - ::env_logger::init().ok(); - let mut net = TestNet::new(3); - net.peer_mut(0).chain.add_blocks(300, false); - net.peer_mut(1).chain.add_blocks(300, false); - net.peer_mut(2).chain.add_blocks(300, false); - net.peer_mut(0).chain.add_blocks(100, true); //fork - net.peer_mut(1).chain.add_blocks(200, false); - net.peer_mut(2).chain.add_blocks(200, false); - net.peer_mut(1).chain.add_blocks(100, false); //fork between 1 and 2 - net.peer_mut(2).chain.add_blocks(10, true); - // peer 1 has the best chain of 601 blocks - let peer1_chain = net.peer(1).chain.numbers.read().unwrap().clone(); - net.sync(); - assert_eq!(net.peer(0).chain.numbers.read().unwrap().deref(), &peer1_chain); - assert_eq!(net.peer(1).chain.numbers.read().unwrap().deref(), &peer1_chain); - assert_eq!(net.peer(2).chain.numbers.read().unwrap().deref(), &peer1_chain); -} - -#[test] -fn chain_restart() { - let mut net = TestNet::new(3); - net.peer_mut(1).chain.add_blocks(1000, false); - net.peer_mut(2).chain.add_blocks(1000, false); - - net.sync_steps(8); - - // make sure that sync has actually happened - assert!(net.peer(0).chain.chain_info().best_block_number > 100); - net.restart_peer(0); - - let status = net.peer(0).sync.status(); - assert_eq!(status.state, SyncState::NotSynced); -} - -#[test] -fn chain_status_empty() { - let net = TestNet::new(2); - assert_eq!(net.peer(0).sync.status().state, SyncState::NotSynced); -} \ No newline at end of file diff --git a/sync/src/tests/mod.rs b/sync/src/tests/mod.rs new file mode 100644 index 000000000..a5fa44b04 --- /dev/null +++ b/sync/src/tests/mod.rs @@ -0,0 +1,2 @@ +pub mod helpers; +mod chain; \ No newline at end of file