Merge branch 'master' into signed_transaction
This commit is contained in:
commit
1d209d909e
@ -83,6 +83,7 @@ pub mod header;
|
|||||||
pub mod service;
|
pub mod service;
|
||||||
pub mod spec;
|
pub mod spec;
|
||||||
pub mod views;
|
pub mod views;
|
||||||
|
pub mod receipt;
|
||||||
|
|
||||||
mod common;
|
mod common;
|
||||||
mod basic_types;
|
mod basic_types;
|
||||||
@ -98,7 +99,6 @@ mod state;
|
|||||||
mod account;
|
mod account;
|
||||||
mod action_params;
|
mod action_params;
|
||||||
mod transaction;
|
mod transaction;
|
||||||
mod receipt;
|
|
||||||
mod null_engine;
|
mod null_engine;
|
||||||
mod builtin;
|
mod builtin;
|
||||||
mod extras;
|
mod extras;
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
//! Receipt
|
||||||
|
|
||||||
use util::*;
|
use util::*;
|
||||||
use basic_types::LogBloom;
|
use basic_types::LogBloom;
|
||||||
use log_entry::LogEntry;
|
use log_entry::LogEntry;
|
||||||
|
242
install-deps.sh
242
install-deps.sh
@ -105,7 +105,14 @@ function run_installer()
|
|||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function prompt_for_input() {
|
||||||
|
while :
|
||||||
|
do
|
||||||
|
read -p "$1 " imp
|
||||||
|
echo $imp
|
||||||
|
return
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
function exe() {
|
function exe() {
|
||||||
echo "\$ $@"; "$@"
|
echo "\$ $@"; "$@"
|
||||||
@ -347,8 +354,31 @@ function run_installer()
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function linux_version()
|
||||||
|
{
|
||||||
|
source /etc/lsb-release
|
||||||
|
|
||||||
|
if [[ $DISTRIB_ID == "Ubuntu" ]]; then
|
||||||
|
if [[ $DISTRIB_RELEASE == "14.04" ]]; then
|
||||||
|
check "Ubuntu-14.04"
|
||||||
|
isUbuntu1404=true
|
||||||
|
else
|
||||||
|
check "Ubuntu, but not 14.04"
|
||||||
|
isUbuntu1404=false
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
check "Ubuntu not found"
|
||||||
|
isUbuntu1404=false
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
function get_linux_dependencies()
|
function get_linux_dependencies()
|
||||||
{
|
{
|
||||||
|
linux_version
|
||||||
|
|
||||||
|
find_multirust
|
||||||
|
find_rocksdb
|
||||||
|
|
||||||
find_curl
|
find_curl
|
||||||
find_git
|
find_git
|
||||||
find_make
|
find_make
|
||||||
@ -357,6 +387,46 @@ function run_installer()
|
|||||||
find_apt
|
find_apt
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function find_rocksdb()
|
||||||
|
{
|
||||||
|
depCount=$((depCount+1))
|
||||||
|
if [[ $(ldconfig -v 2>/dev/null | grep rocksdb | wc -l) == 1 ]]; then
|
||||||
|
depFound=$((depFound+1))
|
||||||
|
check "apt-get"
|
||||||
|
isRocksDB=true
|
||||||
|
else
|
||||||
|
uncheck "librocksdb is missing"
|
||||||
|
isRocksDB=false
|
||||||
|
INSTALL_FILES+="${blue}${dim}==> librocksdb:${reset}\n"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function find_multirust()
|
||||||
|
{
|
||||||
|
depCount=$((depCount+2))
|
||||||
|
MULTIRUST_PATH=`which multirust 2>/dev/null`
|
||||||
|
if [[ -f $MULTIRUST_PATH ]]; then
|
||||||
|
depFound=$((depFound+1))
|
||||||
|
check "multirust"
|
||||||
|
isMultirust=true
|
||||||
|
if [[ $(multirust show-default 2>/dev/null | grep nightly | wc -l) == 4 ]]; then
|
||||||
|
depFound=$((depFound+1))
|
||||||
|
check "rust nightly"
|
||||||
|
isMultirustNightly=true
|
||||||
|
else
|
||||||
|
uncheck "rust is not nightly"
|
||||||
|
isMultirustNightly=false
|
||||||
|
INSTALL_FILES+="${blue}${dim}==> multirust -> rust nightly:${reset}\n"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
uncheck "multirust is missing"
|
||||||
|
uncheck "rust nightly is missing"
|
||||||
|
isMultirust=false
|
||||||
|
isMultirustNightly=false
|
||||||
|
INSTALL_FILES+="${blue}${dim}==> multirust:${reset}\n"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
function find_apt()
|
function find_apt()
|
||||||
{
|
{
|
||||||
depCount=$((depCount+1))
|
depCount=$((depCount+1))
|
||||||
@ -367,7 +437,6 @@ function run_installer()
|
|||||||
then
|
then
|
||||||
depFound=$((depFound+1))
|
depFound=$((depFound+1))
|
||||||
check "apt-get"
|
check "apt-get"
|
||||||
echo "$($APT_PATH -v)"
|
|
||||||
isApt=true
|
isApt=true
|
||||||
else
|
else
|
||||||
uncheck "apt-get is missing"
|
uncheck "apt-get is missing"
|
||||||
@ -435,7 +504,7 @@ function run_installer()
|
|||||||
function find_curl()
|
function find_curl()
|
||||||
{
|
{
|
||||||
depCount=$((depCount+1))
|
depCount=$((depCount+1))
|
||||||
MAKE_PATH=`which curl 2>/dev/null`
|
CURL_PATH=`which curl 2>/dev/null`
|
||||||
|
|
||||||
if [[ -f $CURL_PATH ]]
|
if [[ -f $CURL_PATH ]]
|
||||||
then
|
then
|
||||||
@ -449,8 +518,21 @@ function run_installer()
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function ubuntu1404_rocksdb_installer()
|
||||||
|
{
|
||||||
|
sudo apt-get update -qq
|
||||||
|
sudo apt-get install -qq -y software-properties-common
|
||||||
|
sudo apt-add-repository -y ppa:giskou/librocksdb
|
||||||
|
sudo apt-get -f -y install
|
||||||
|
sudo apt-get update -qq
|
||||||
|
sudo apt-get install -qq -y librocksdb
|
||||||
|
}
|
||||||
|
|
||||||
function linux_rocksdb_installer()
|
function linux_rocksdb_installer()
|
||||||
{
|
{
|
||||||
|
if [[ $isUbuntu1404 == true ]]; then
|
||||||
|
ubuntu1404_rocksdb_installer
|
||||||
|
else
|
||||||
oldpwd=`pwd`
|
oldpwd=`pwd`
|
||||||
cd /tmp
|
cd /tmp
|
||||||
exe git clone --branch v4.1 --depth=1 https://github.com/facebook/rocksdb.git
|
exe git clone --branch v4.1 --depth=1 https://github.com/facebook/rocksdb.git
|
||||||
@ -461,13 +543,14 @@ function run_installer()
|
|||||||
cd /tmp
|
cd /tmp
|
||||||
rm -rf /tmp/rocksdb
|
rm -rf /tmp/rocksdb
|
||||||
cd $oldpwd
|
cd $oldpwd
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
function linux_installer()
|
function linux_installer()
|
||||||
{
|
{
|
||||||
if [[ $isGCC == false || $isGit == false || $isMake == false || $isCurl == false ]]; then
|
if [[ $isGCC == false || $isGit == false || $isMake == false || $isCurl == false ]]; then
|
||||||
info "Installing build dependencies..."
|
info "Installing build dependencies..."
|
||||||
sudo apt-get update
|
sudo apt-get update -qq
|
||||||
if [[ $isGit == false ]]; then
|
if [[ $isGit == false ]]; then
|
||||||
sudo apt-get install -q -y git
|
sudo apt-get install -q -y git
|
||||||
fi
|
fi
|
||||||
@ -483,15 +566,24 @@ function run_installer()
|
|||||||
echo
|
echo
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ $isRocksDB == false ]]; then
|
||||||
info "Installing rocksdb..."
|
info "Installing rocksdb..."
|
||||||
linux_rocksdb_installer
|
linux_rocksdb_installer
|
||||||
echo
|
echo
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $isMultirust == false ]]; then
|
||||||
info "Installing multirust..."
|
info "Installing multirust..."
|
||||||
curl -sf https://raw.githubusercontent.com/brson/multirust/master/blastoff.sh | sudo sh -s -- --yes
|
curl -sf https://raw.githubusercontent.com/brson/multirust/master/blastoff.sh | sudo sh -s -- --yes
|
||||||
|
echo
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $isMultirustNightly == false ]]; then
|
||||||
|
info "Installing rust nightly..."
|
||||||
sudo multirust update nightly
|
sudo multirust update nightly
|
||||||
sudo multirust default nightly
|
sudo multirust default nightly
|
||||||
echo
|
echo
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
function install()
|
function install()
|
||||||
@ -511,12 +603,111 @@ function run_installer()
|
|||||||
function verify_installation()
|
function verify_installation()
|
||||||
{
|
{
|
||||||
info "Verifying installation"
|
info "Verifying installation"
|
||||||
# find_eth
|
|
||||||
|
|
||||||
# if [[ $isEth == false ]]
|
if [[ $OS_TYPE == "linux" ]]; then
|
||||||
# then
|
find_curl
|
||||||
# abortInstall
|
find_git
|
||||||
# fi
|
find_make
|
||||||
|
find_gcc
|
||||||
|
find_rocksdb
|
||||||
|
find_multirust
|
||||||
|
|
||||||
|
if [[ $isCurl == false || $isGit == false || $isMake == false || $isGCC == false || $isRocksDB == false || $isMultirustNightly == false ]]; then
|
||||||
|
abortInstall
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function build_parity()
|
||||||
|
{
|
||||||
|
info "Downloading Parity..."
|
||||||
|
git clone git@github.com:ethcore/parity
|
||||||
|
cd parity
|
||||||
|
git submodule init
|
||||||
|
git submodule update
|
||||||
|
|
||||||
|
info "Building..."
|
||||||
|
cargo build --release
|
||||||
|
|
||||||
|
cd ..
|
||||||
|
|
||||||
|
echo
|
||||||
|
head "Parity is built!"
|
||||||
|
info "Parity source code is in ${b}$(pwd)/parity${reset}. From that path, you can:"
|
||||||
|
info "- Run a client & sync the chain with:"
|
||||||
|
info " ${b}cargo run --release${reset}"
|
||||||
|
info "- Run a JSONRPC-capable client (for use with netstats) with:"
|
||||||
|
info " ${b}cargo run --release -- -j --jsonrpc-url 127.0.0.1:8545${reset}"
|
||||||
|
info "- Run tests with:"
|
||||||
|
info " ${b}cargo test --release --features ethcore/json-tests -p ethcore${reset}"
|
||||||
|
info "- Install the client with:"
|
||||||
|
info " ${b}sudo cp target/release/parity /usr/bin${reset}"
|
||||||
|
echo
|
||||||
|
}
|
||||||
|
|
||||||
|
function install_netstats()
|
||||||
|
{
|
||||||
|
echo "Installing netstats"
|
||||||
|
|
||||||
|
secret=$(prompt_for_input "Please enter the netstats secret:")
|
||||||
|
instance_name=$(prompt_for_input "Please enter your instance name:")
|
||||||
|
contact_details=$(prompt_for_input "Please enter your contact details (optional):")
|
||||||
|
|
||||||
|
# install ethereum & install dependencies
|
||||||
|
sudo apt-get install -y -qq build-essential git unzip wget nodejs npm ntp cloud-utils
|
||||||
|
|
||||||
|
# add node symlink if it doesn't exist
|
||||||
|
[[ ! -f /usr/bin/node ]] && sudo ln -s /usr/bin/nodejs /usr/bin/node
|
||||||
|
|
||||||
|
# set up time update cronjob
|
||||||
|
sudo bash -c "cat > /etc/cron.hourly/ntpdate << EOF
|
||||||
|
#!/bin/sh
|
||||||
|
pm2 flush
|
||||||
|
sudo service ntp stop
|
||||||
|
sudo ntpdate -s ntp.ubuntu.com
|
||||||
|
sudo service ntp start
|
||||||
|
EOF"
|
||||||
|
|
||||||
|
sudo chmod 755 /etc/cron.hourly/ntpdate
|
||||||
|
|
||||||
|
[ ! -d "www" ] && git clone https://github.com/cubedro/eth-net-intelligence-api netstats
|
||||||
|
cd netstats
|
||||||
|
git pull
|
||||||
|
git checkout 95d595258239a0fdf56b97dedcfb2be62f6170e6
|
||||||
|
|
||||||
|
sudo npm install
|
||||||
|
sudo npm install pm2 -g
|
||||||
|
|
||||||
|
cat > app.json << EOL
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"name" : "node-app",
|
||||||
|
"script" : "app.js",
|
||||||
|
"log_date_format" : "YYYY-MM-DD HH:mm Z",
|
||||||
|
"merge_logs" : false,
|
||||||
|
"watch" : false,
|
||||||
|
"max_restarts" : 10,
|
||||||
|
"exec_interpreter" : "node",
|
||||||
|
"exec_mode" : "fork_mode",
|
||||||
|
"env":
|
||||||
|
{
|
||||||
|
"NODE_ENV" : "production",
|
||||||
|
"RPC_HOST" : "localhost",
|
||||||
|
"RPC_PORT" : "8545",
|
||||||
|
"LISTENING_PORT" : "30303",
|
||||||
|
"INSTANCE_NAME" : "${instance_name}",
|
||||||
|
"CONTACT_DETAILS" : "${contact_details}",
|
||||||
|
"WS_SERVER" : "wss://rpc.ethstats.net",
|
||||||
|
"WS_SECRET" : "${secret}",
|
||||||
|
"VERBOSITY" : 2
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
EOL
|
||||||
|
|
||||||
|
pm2 start app.json
|
||||||
|
cd ..
|
||||||
}
|
}
|
||||||
|
|
||||||
function abortInstall()
|
function abortInstall()
|
||||||
@ -530,21 +721,28 @@ function run_installer()
|
|||||||
|
|
||||||
function finish()
|
function finish()
|
||||||
{
|
{
|
||||||
# echo
|
echo
|
||||||
# successHeading "Installation successful!"
|
successHeading "Installation successful!"
|
||||||
# head "Next steps"
|
echo
|
||||||
# info "Run ${cyan}\`\`${reset} to get started.${reset}"
|
|
||||||
# echo
|
|
||||||
exit 0
|
exit 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
####### Run the script
|
||||||
|
tput clear
|
||||||
|
echo
|
||||||
|
echo
|
||||||
|
echo " ${blue}∷ ${b}${green} WELCOME TO PARITY ${reset} ${blue}∷${reset}"
|
||||||
|
echo
|
||||||
|
echo
|
||||||
|
|
||||||
# Check dependencies
|
# Check dependencies
|
||||||
head "Checking OS dependencies"
|
head "Checking OS dependencies"
|
||||||
detectOS
|
detectOS
|
||||||
|
|
||||||
if [[ $INSTALL_FILES != "" ]]; then
|
if [[ $INSTALL_FILES != "" ]]; then
|
||||||
echo
|
echo
|
||||||
head "In addition to the parity build dependencies, this script will install:"
|
head "In addition to the Parity build dependencies, this script will install:"
|
||||||
echo "$INSTALL_FILES"
|
echo "$INSTALL_FILES"
|
||||||
echo
|
echo
|
||||||
fi
|
fi
|
||||||
@ -558,6 +756,20 @@ function run_installer()
|
|||||||
# Check installation
|
# Check installation
|
||||||
verify_installation
|
verify_installation
|
||||||
|
|
||||||
|
if [[ ! -e parity ]]; then
|
||||||
|
# Maybe install parity
|
||||||
|
if wait_for_user "${b}Build dependencies installed B-)!${reset} Would you like to download and build parity?"; then
|
||||||
|
# Do get parity.
|
||||||
|
build_parity
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $OS_TYPE == "linux" && $DISTRIB_ID == "Ubuntu" ]]; then
|
||||||
|
if wait_for_user "${b}Netstats:${reset} Would you like to install and configure a netstats client?"; then
|
||||||
|
install_netstats
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
# Display goodby message
|
# Display goodby message
|
||||||
finish
|
finish
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,9 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
PARITY_DEB_URL=https://github.com/ethcore/parity/releases/download/beta-0.9/parity_0.9.0-0_amd64.deb
|
||||||
|
|
||||||
function run_installer()
|
function run_installer()
|
||||||
{
|
{
|
||||||
####### Init vars
|
####### Init vars
|
||||||
@ -22,6 +26,7 @@ function run_installer()
|
|||||||
isGit=false
|
isGit=false
|
||||||
isRuby=false
|
isRuby=false
|
||||||
isBrew=false
|
isBrew=false
|
||||||
|
isDocker=false
|
||||||
canContinue=true
|
canContinue=true
|
||||||
depCount=0
|
depCount=0
|
||||||
depFound=0
|
depFound=0
|
||||||
@ -97,20 +102,24 @@ function run_installer()
|
|||||||
do
|
do
|
||||||
read -p "${blue}==>${reset} $1 [Y/n] " imp
|
read -p "${blue}==>${reset} $1 [Y/n] " imp
|
||||||
case $imp in
|
case $imp in
|
||||||
[yY] ) echo; break ;;
|
[yY] ) return 0; break ;;
|
||||||
'' ) echo; break ;;
|
'' ) echo; break ;;
|
||||||
[nN] ) abortInstall "${red}==>${reset} Process stopped by user. To resume the install run the one-liner command again." ;;
|
[nN] ) return 1 ;;
|
||||||
* ) echo "Unrecognized option provided. Please provide either 'Y' or 'N'";
|
* ) echo "Unrecognized option provided. Please provide either 'Y' or 'N'";
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function prompt_for_input() {
|
||||||
|
while :
|
||||||
function exe() {
|
do
|
||||||
echo "\$ $@"; "$@"
|
read -p "$1 " imp
|
||||||
|
echo $imp
|
||||||
|
return
|
||||||
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
function detectOS() {
|
function detectOS() {
|
||||||
if [[ "$OSTYPE" == "linux-gnu" ]]
|
if [[ "$OSTYPE" == "linux-gnu" ]]
|
||||||
then
|
then
|
||||||
@ -195,7 +204,6 @@ function run_installer()
|
|||||||
if [[ -f $ETH_PATH ]]
|
if [[ -f $ETH_PATH ]]
|
||||||
then
|
then
|
||||||
check "Found parity: $ETH_PATH"
|
check "Found parity: $ETH_PATH"
|
||||||
echo "$($ETH_PATH -V)"
|
|
||||||
isEth=true
|
isEth=true
|
||||||
else
|
else
|
||||||
uncheck "parity is missing"
|
uncheck "parity is missing"
|
||||||
@ -317,20 +325,20 @@ function run_installer()
|
|||||||
osx_dependency_installer
|
osx_dependency_installer
|
||||||
|
|
||||||
info "Adding ethcore repository"
|
info "Adding ethcore repository"
|
||||||
exe brew tap ethcore/ethcore git@github.com:ethcore/homebrew-ethcore.git
|
brew tap ethcore/ethcore https://github.com/ethcore/homebrew-ethcore.git
|
||||||
echo
|
echo
|
||||||
|
|
||||||
info "Updating brew"
|
info "Updating brew"
|
||||||
exe brew update
|
brew update
|
||||||
echo
|
echo
|
||||||
|
|
||||||
info "Installing parity"
|
info "Installing parity"
|
||||||
if [[ $isEth == true ]]
|
if [[ $isEth == true ]]
|
||||||
then
|
then
|
||||||
exe brew reinstall parity
|
brew reinstall parity
|
||||||
else
|
else
|
||||||
exe brew install parity
|
brew install parity
|
||||||
exe brew linkapps parity
|
brew linkapps parity
|
||||||
fi
|
fi
|
||||||
echo
|
echo
|
||||||
}
|
}
|
||||||
@ -356,6 +364,7 @@ function run_installer()
|
|||||||
function get_linux_dependencies()
|
function get_linux_dependencies()
|
||||||
{
|
{
|
||||||
find_apt
|
find_apt
|
||||||
|
find_docker
|
||||||
}
|
}
|
||||||
|
|
||||||
function find_apt()
|
function find_apt()
|
||||||
@ -372,38 +381,96 @@ function run_installer()
|
|||||||
isApt=false
|
isApt=false
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function find_docker()
|
||||||
|
{
|
||||||
|
DOCKER_PATH=`which docker 2>/dev/null`
|
||||||
|
|
||||||
|
if [[ -f $DOCKER_PATH ]]
|
||||||
|
then
|
||||||
|
check "docker"
|
||||||
|
echo "$($DOCKER_PATH -v)"
|
||||||
|
isDocker=true
|
||||||
|
else
|
||||||
|
isDocker=false
|
||||||
|
fi
|
||||||
|
}
|
||||||
function linux_rocksdb_installer()
|
function linux_rocksdb_installer()
|
||||||
{
|
{
|
||||||
oldpwd=`pwd`
|
sudo add-apt-repository -y ppa:giskou/librocksdb
|
||||||
cd /tmp
|
sudo apt-get -f -y install
|
||||||
exe git clone --branch v4.1 --depth=1 https://github.com/facebook/rocksdb.git
|
sudo apt-get update
|
||||||
cd rocksdb
|
sudo apt-get install -y librocksdb
|
||||||
exe make shared_lib
|
|
||||||
sudo cp -a librocksdb.so* /usr/lib
|
|
||||||
sudo ldconfig
|
|
||||||
cd /tmp
|
|
||||||
rm -rf /tmp/rocksdb
|
|
||||||
cd $oldpwd
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function linux_installer()
|
function linux_installer()
|
||||||
{
|
{
|
||||||
info "Installing git"
|
info "Installing dependencies"
|
||||||
sudo apt-get install -q -y git
|
sudo apt-get update && sudo apt-get install -q -y git curl g++ wget
|
||||||
echo
|
echo
|
||||||
|
|
||||||
info "Installing rocksdb"
|
info "Installing rocksdb"
|
||||||
linux_rocksdb_installer
|
linux_rocksdb_installer
|
||||||
echo
|
echo
|
||||||
|
|
||||||
info "Installing multirust"
|
|
||||||
curl -sf https://raw.githubusercontent.com/brson/multirust/master/blastoff.sh | sudo sh -s -- --yes
|
|
||||||
sudo multirust update nightly
|
|
||||||
sudo multirust default nightly
|
|
||||||
echo
|
|
||||||
|
|
||||||
info "Installing parity"
|
info "Installing parity"
|
||||||
wget --quiet --output-document=- http://ethcore.io/download/parity.deb | dpkg --install -
|
file=/tmp/parity.deb
|
||||||
|
|
||||||
|
|
||||||
|
wget $PARITY_DEB_URL -qO $file
|
||||||
|
sudo dpkg -i $file
|
||||||
|
rm $file
|
||||||
|
}
|
||||||
|
|
||||||
|
function install_netstats()
|
||||||
|
{
|
||||||
|
echo "install netstats"
|
||||||
|
|
||||||
|
if [[ $isDocker == false ]]
|
||||||
|
then
|
||||||
|
info "installing docker"
|
||||||
|
curl -sSL https://get.docker.com/ | sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
dir=$HOME/.netstats
|
||||||
|
|
||||||
|
secret=$(prompt_for_input "Please enter the netstats secret:")
|
||||||
|
instance_name=$(prompt_for_input "Please enter your instance name:")
|
||||||
|
contact_details=$(prompt_for_input "Please enter your contact details (optional):")
|
||||||
|
|
||||||
|
|
||||||
|
mkdir -p $dir
|
||||||
|
cat > $dir/app.json << EOL
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"name" : "node-app",
|
||||||
|
"script" : "app.js",
|
||||||
|
"log_date_format" : "YYYY-MM-DD HH:mm Z",
|
||||||
|
"merge_logs" : false,
|
||||||
|
"watch" : false,
|
||||||
|
"max_restarts" : 10,
|
||||||
|
"exec_interpreter" : "node",
|
||||||
|
"exec_mode" : "fork_mode",
|
||||||
|
"env":
|
||||||
|
{
|
||||||
|
"NODE_ENV" : "production",
|
||||||
|
"RPC_HOST" : "localhost",
|
||||||
|
"RPC_PORT" : "8545",
|
||||||
|
"LISTENING_PORT" : "30303",
|
||||||
|
"INSTANCE_NAME" : "${instance_name}",
|
||||||
|
"CONTACT_DETAILS" : "${contact_details}",
|
||||||
|
"WS_SERVER" : "wss://rpc.ethstats.net",
|
||||||
|
"WS_SECRET" : "${secret}",
|
||||||
|
"VERBOSITY" : 2
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
EOL
|
||||||
|
|
||||||
|
sudo docker rm --force netstats-client 2> /dev/null
|
||||||
|
sudo docker pull ethcore/netstats-client
|
||||||
|
sudo docker run -d --net=host --name netstats-client -v $dir/app.json:/home/ethnetintel/eth-net-intelligence-api/app.json ethcore/netstats-client
|
||||||
}
|
}
|
||||||
|
|
||||||
function install()
|
function install()
|
||||||
@ -442,11 +509,11 @@ function run_installer()
|
|||||||
|
|
||||||
function finish()
|
function finish()
|
||||||
{
|
{
|
||||||
# echo
|
echo
|
||||||
# successHeading "Installation successful!"
|
successHeading "Installation successful!"
|
||||||
# head "Next steps"
|
# head "Next steps"
|
||||||
# info "Run ${cyan}\`\`${reset} to get started.${reset}"
|
# info "Run ${cyan}\`\`${reset} to get started.${reset}"
|
||||||
# echo
|
echo
|
||||||
exit 0
|
exit 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -460,11 +527,26 @@ function run_installer()
|
|||||||
echo
|
echo
|
||||||
|
|
||||||
# Prompt user to continue or abort
|
# Prompt user to continue or abort
|
||||||
wait_for_user "${b}OK,${reset} let's go!"
|
if wait_for_user "${b}OK,${reset} let's go!"
|
||||||
|
then
|
||||||
|
echo "Installing..."
|
||||||
|
else
|
||||||
|
abortInstall "${red}==>${reset} Process stopped by user. To resume the install run the one-liner command again."
|
||||||
|
fi
|
||||||
|
|
||||||
# Install dependencies and eth
|
# Install dependencies and eth
|
||||||
install
|
install
|
||||||
|
|
||||||
|
if [[ $OS_TYPE == "linux" ]]
|
||||||
|
then
|
||||||
|
echo "Netstats:"
|
||||||
|
head "Would you like to install and configure a netstats client?"
|
||||||
|
if wait_for_user "${b}OK,${reset} let's go!"
|
||||||
|
then
|
||||||
|
install_netstats
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
# Check installation
|
# Check installation
|
||||||
verify_installation
|
verify_installation
|
||||||
|
|
||||||
|
9
sync/cov.sh
Executable file
9
sync/cov.sh
Executable file
@ -0,0 +1,9 @@
|
|||||||
|
if ! type kcov > /dev/null; then
|
||||||
|
echo "Install kcov first (details inside this file). Aborting."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
cargo test --no-run || exit $?
|
||||||
|
mkdir -p target/coverage
|
||||||
|
kcov --exclude-pattern ~/.multirust,rocksdb,secp256k1,sync/src/tests --include-pattern sync/src --verify target/coverage target/debug/ethsync*
|
||||||
|
xdg-open target/coverage/index.html
|
@ -177,6 +177,7 @@ pub struct ChainSync {
|
|||||||
have_common_block: bool,
|
have_common_block: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type RlpResponseResult = Result<Option<(PacketId, RlpStream)>, PacketDecodeError>;
|
||||||
|
|
||||||
impl ChainSync {
|
impl ChainSync {
|
||||||
/// Create a new instance of syncing strategy.
|
/// Create a new instance of syncing strategy.
|
||||||
@ -845,7 +846,7 @@ impl ChainSync {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Respond to GetBlockHeaders request
|
/// Respond to GetBlockHeaders request
|
||||||
fn return_block_headers(&self, io: &mut SyncIo, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
|
fn return_block_headers(io: &SyncIo, r: &UntrustedRlp) -> RlpResponseResult {
|
||||||
// Packet layout:
|
// Packet layout:
|
||||||
// [ block: { P , B_32 }, maxHeaders: P, skip: P, reverse: P in { 0 , 1 } ]
|
// [ block: { P , B_32 }, maxHeaders: P, skip: P, reverse: P in { 0 , 1 } ]
|
||||||
let max_headers: usize = try!(r.val_at(1));
|
let max_headers: usize = try!(r.val_at(1));
|
||||||
@ -892,18 +893,16 @@ impl ChainSync {
|
|||||||
}
|
}
|
||||||
let mut rlp = RlpStream::new_list(count as usize);
|
let mut rlp = RlpStream::new_list(count as usize);
|
||||||
rlp.append_raw(&data, count as usize);
|
rlp.append_raw(&data, count as usize);
|
||||||
io.respond(BLOCK_HEADERS_PACKET, rlp.out()).unwrap_or_else(|e|
|
|
||||||
debug!(target: "sync", "Error sending headers: {:?}", e));
|
|
||||||
trace!(target: "sync", "-> GetBlockHeaders: returned {} entries", count);
|
trace!(target: "sync", "-> GetBlockHeaders: returned {} entries", count);
|
||||||
Ok(())
|
Ok(Some((BLOCK_HEADERS_PACKET, rlp)))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Respond to GetBlockBodies request
|
/// Respond to GetBlockBodies request
|
||||||
fn return_block_bodies(&self, io: &mut SyncIo, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
|
fn return_block_bodies(io: &SyncIo, r: &UntrustedRlp) -> RlpResponseResult {
|
||||||
let mut count = r.item_count();
|
let mut count = r.item_count();
|
||||||
if count == 0 {
|
if count == 0 {
|
||||||
debug!(target: "sync", "Empty GetBlockBodies request, ignoring.");
|
debug!(target: "sync", "Empty GetBlockBodies request, ignoring.");
|
||||||
return Ok(());
|
return Ok(None);
|
||||||
}
|
}
|
||||||
trace!(target: "sync", "-> GetBlockBodies: {} entries", count);
|
trace!(target: "sync", "-> GetBlockBodies: {} entries", count);
|
||||||
count = min(count, MAX_BODIES_TO_SEND);
|
count = min(count, MAX_BODIES_TO_SEND);
|
||||||
@ -917,18 +916,16 @@ impl ChainSync {
|
|||||||
}
|
}
|
||||||
let mut rlp = RlpStream::new_list(added);
|
let mut rlp = RlpStream::new_list(added);
|
||||||
rlp.append_raw(&data, added);
|
rlp.append_raw(&data, added);
|
||||||
io.respond(BLOCK_BODIES_PACKET, rlp.out()).unwrap_or_else(|e|
|
|
||||||
debug!(target: "sync", "Error sending headers: {:?}", e));
|
|
||||||
trace!(target: "sync", "-> GetBlockBodies: returned {} entries", added);
|
trace!(target: "sync", "-> GetBlockBodies: returned {} entries", added);
|
||||||
Ok(())
|
Ok(Some((BLOCK_BODIES_PACKET, rlp)))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Respond to GetNodeData request
|
/// Respond to GetNodeData request
|
||||||
fn return_node_data(&self, io: &mut SyncIo, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
|
fn return_node_data(io: &SyncIo, r: &UntrustedRlp) -> RlpResponseResult {
|
||||||
let mut count = r.item_count();
|
let mut count = r.item_count();
|
||||||
if count == 0 {
|
if count == 0 {
|
||||||
debug!(target: "sync", "Empty GetNodeData request, ignoring.");
|
debug!(target: "sync", "Empty GetNodeData request, ignoring.");
|
||||||
return Ok(());
|
return Ok(None);
|
||||||
}
|
}
|
||||||
count = min(count, MAX_NODE_DATA_TO_SEND);
|
count = min(count, MAX_NODE_DATA_TO_SEND);
|
||||||
let mut added = 0usize;
|
let mut added = 0usize;
|
||||||
@ -941,33 +938,44 @@ impl ChainSync {
|
|||||||
}
|
}
|
||||||
let mut rlp = RlpStream::new_list(added);
|
let mut rlp = RlpStream::new_list(added);
|
||||||
rlp.append_raw(&data, added);
|
rlp.append_raw(&data, added);
|
||||||
io.respond(NODE_DATA_PACKET, rlp.out()).unwrap_or_else(|e|
|
Ok(Some((NODE_DATA_PACKET, rlp)))
|
||||||
debug!(target: "sync", "Error sending headers: {:?}", e));
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Respond to GetReceipts request
|
fn return_receipts(io: &SyncIo, rlp: &UntrustedRlp) -> RlpResponseResult {
|
||||||
fn return_receipts(&self, io: &mut SyncIo, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
|
let mut count = rlp.item_count();
|
||||||
let mut count = r.item_count();
|
|
||||||
if count == 0 {
|
if count == 0 {
|
||||||
debug!(target: "sync", "Empty GetReceipts request, ignoring.");
|
debug!(target: "sync", "Empty GetReceipts request, ignoring.");
|
||||||
return Ok(());
|
return Ok(None);
|
||||||
}
|
}
|
||||||
count = min(count, MAX_RECEIPTS_TO_SEND);
|
count = min(count, MAX_RECEIPTS_TO_SEND);
|
||||||
let mut added = 0usize;
|
let mut added = 0usize;
|
||||||
let mut data = Bytes::new();
|
let mut data = Bytes::new();
|
||||||
for i in 0..count {
|
for i in 0..count {
|
||||||
if let Some(mut hdr) = io.chain().block_receipts(&try!(r.val_at::<H256>(i))) {
|
if let Some(mut hdr) = io.chain().block_receipts(&try!(rlp.val_at::<H256>(i))) {
|
||||||
data.append(&mut hdr);
|
data.append(&mut hdr);
|
||||||
added += 1;
|
added += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let mut rlp = RlpStream::new_list(added);
|
let mut rlp_result = RlpStream::new_list(added);
|
||||||
rlp.append_raw(&data, added);
|
rlp_result.append_raw(&data, added);
|
||||||
io.respond(RECEIPTS_PACKET, rlp.out()).unwrap_or_else(|e|
|
Ok(Some((RECEIPTS_PACKET, rlp_result)))
|
||||||
debug!(target: "sync", "Error sending headers: {:?}", e));
|
}
|
||||||
|
|
||||||
|
fn return_rlp<FRlp, FError>(&self, io: &mut SyncIo, rlp: &UntrustedRlp, rlp_func: FRlp, error_func: FError) -> Result<(), PacketDecodeError>
|
||||||
|
where FRlp : Fn(&SyncIo, &UntrustedRlp) -> RlpResponseResult,
|
||||||
|
FError : FnOnce(UtilError) -> String
|
||||||
|
{
|
||||||
|
let response = rlp_func(io, rlp);
|
||||||
|
match response {
|
||||||
|
Err(e) => Err(e),
|
||||||
|
Ok(Some((packet_id, rlp_stream))) => {
|
||||||
|
io.respond(packet_id, rlp_stream.out()).unwrap_or_else(
|
||||||
|
|e| debug!(target: "sync", "{:?}", error_func(e)));
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
_ => Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Dispatch incoming requests and responses
|
/// Dispatch incoming requests and responses
|
||||||
pub fn on_packet(&mut self, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) {
|
pub fn on_packet(&mut self, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) {
|
||||||
@ -975,14 +983,27 @@ impl ChainSync {
|
|||||||
let result = match packet_id {
|
let result = match packet_id {
|
||||||
STATUS_PACKET => self.on_peer_status(io, peer, &rlp),
|
STATUS_PACKET => self.on_peer_status(io, peer, &rlp),
|
||||||
TRANSACTIONS_PACKET => self.on_peer_transactions(io, peer, &rlp),
|
TRANSACTIONS_PACKET => self.on_peer_transactions(io, peer, &rlp),
|
||||||
GET_BLOCK_HEADERS_PACKET => self.return_block_headers(io, &rlp),
|
|
||||||
BLOCK_HEADERS_PACKET => self.on_peer_block_headers(io, peer, &rlp),
|
BLOCK_HEADERS_PACKET => self.on_peer_block_headers(io, peer, &rlp),
|
||||||
GET_BLOCK_BODIES_PACKET => self.return_block_bodies(io, &rlp),
|
|
||||||
BLOCK_BODIES_PACKET => self.on_peer_block_bodies(io, peer, &rlp),
|
BLOCK_BODIES_PACKET => self.on_peer_block_bodies(io, peer, &rlp),
|
||||||
NEW_BLOCK_PACKET => self.on_peer_new_block(io, peer, &rlp),
|
NEW_BLOCK_PACKET => self.on_peer_new_block(io, peer, &rlp),
|
||||||
NEW_BLOCK_HASHES_PACKET => self.on_peer_new_hashes(io, peer, &rlp),
|
NEW_BLOCK_HASHES_PACKET => self.on_peer_new_hashes(io, peer, &rlp),
|
||||||
GET_NODE_DATA_PACKET => self.return_node_data(io, &rlp),
|
|
||||||
GET_RECEIPTS_PACKET => self.return_receipts(io, &rlp),
|
GET_BLOCK_BODIES_PACKET => self.return_rlp(io, &rlp,
|
||||||
|
ChainSync::return_block_bodies,
|
||||||
|
|e| format!("Error sending block bodies: {:?}", e)),
|
||||||
|
|
||||||
|
GET_BLOCK_HEADERS_PACKET => self.return_rlp(io, &rlp,
|
||||||
|
ChainSync::return_block_headers,
|
||||||
|
|e| format!("Error sending block headers: {:?}", e)),
|
||||||
|
|
||||||
|
GET_RECEIPTS_PACKET => self.return_rlp(io, &rlp,
|
||||||
|
ChainSync::return_receipts,
|
||||||
|
|e| format!("Error sending receipts: {:?}", e)),
|
||||||
|
|
||||||
|
GET_NODE_DATA_PACKET => self.return_rlp(io, &rlp,
|
||||||
|
ChainSync::return_node_data,
|
||||||
|
|e| format!("Error sending nodes: {:?}", e)),
|
||||||
|
|
||||||
_ => {
|
_ => {
|
||||||
debug!(target: "sync", "Unknown packet {}", packet_id);
|
debug!(target: "sync", "Unknown packet {}", packet_id);
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -1013,3 +1034,78 @@ impl ChainSync {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use tests::helpers::*;
|
||||||
|
use super::*;
|
||||||
|
use util::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn return_receipts_empty() {
|
||||||
|
let mut client = TestBlockChainClient::new();
|
||||||
|
let mut queue = VecDeque::new();
|
||||||
|
let io = TestIo::new(&mut client, &mut queue, None);
|
||||||
|
|
||||||
|
let result = ChainSync::return_receipts(&io, &UntrustedRlp::new(&[0xc0]));
|
||||||
|
|
||||||
|
assert!(result.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn return_receipts() {
|
||||||
|
let mut client = TestBlockChainClient::new();
|
||||||
|
let mut queue = VecDeque::new();
|
||||||
|
let mut io = TestIo::new(&mut client, &mut queue, None);
|
||||||
|
|
||||||
|
let mut receipt_list = RlpStream::new_list(4);
|
||||||
|
receipt_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555"));
|
||||||
|
receipt_list.append(&H256::from("ff00000000000000000000000000000000000000000000000000000000000000"));
|
||||||
|
receipt_list.append(&H256::from("fff0000000000000000000000000000000000000000000000000000000000000"));
|
||||||
|
receipt_list.append(&H256::from("aff0000000000000000000000000000000000000000000000000000000000000"));
|
||||||
|
|
||||||
|
let receipts_request = receipt_list.out();
|
||||||
|
// it returns rlp ONLY for hashes started with "f"
|
||||||
|
let result = ChainSync::return_receipts(&io, &UntrustedRlp::new(&receipts_request.clone()));
|
||||||
|
|
||||||
|
assert!(result.is_ok());
|
||||||
|
let rlp_result = result.unwrap();
|
||||||
|
assert!(rlp_result.is_some());
|
||||||
|
|
||||||
|
// the length of two rlp-encoded receipts
|
||||||
|
assert_eq!(597, rlp_result.unwrap().1.out().len());
|
||||||
|
|
||||||
|
let mut sync = ChainSync::new();
|
||||||
|
io.sender = Some(2usize);
|
||||||
|
sync.on_packet(&mut io, 1usize, super::GET_RECEIPTS_PACKET, &receipts_request);
|
||||||
|
assert_eq!(1, io.queue.len());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn return_nodes() {
|
||||||
|
let mut client = TestBlockChainClient::new();
|
||||||
|
let mut queue = VecDeque::new();
|
||||||
|
let mut io = TestIo::new(&mut client, &mut queue, None);
|
||||||
|
|
||||||
|
let mut node_list = RlpStream::new_list(3);
|
||||||
|
node_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555"));
|
||||||
|
node_list.append(&H256::from("ffffffffffffffffffffffffffffffffffffffffffffaaaaaaaaaaaaaaaaaaaa"));
|
||||||
|
node_list.append(&H256::from("aff0000000000000000000000000000000000000000000000000000000000000"));
|
||||||
|
|
||||||
|
let node_request = node_list.out();
|
||||||
|
// it returns rlp ONLY for hashes started with "f"
|
||||||
|
let result = ChainSync::return_node_data(&io, &UntrustedRlp::new(&node_request.clone()));
|
||||||
|
|
||||||
|
assert!(result.is_ok());
|
||||||
|
let rlp_result = result.unwrap();
|
||||||
|
assert!(rlp_result.is_some());
|
||||||
|
|
||||||
|
// the length of one rlp-encoded hashe
|
||||||
|
assert_eq!(34, rlp_result.unwrap().1.out().len());
|
||||||
|
|
||||||
|
let mut sync = ChainSync::new();
|
||||||
|
io.sender = Some(2usize);
|
||||||
|
sync.on_packet(&mut io, 1usize, super::GET_NODE_DATA_PACKET, &node_request);
|
||||||
|
assert_eq!(1, io.queue.len());
|
||||||
|
}
|
||||||
|
}
|
@ -110,5 +110,3 @@ impl NetworkProtocolHandler<SyncMessage> for EthSync {
|
|||||||
self.sync.write().unwrap().maintain_sync(&mut NetSyncIo::new(io, self.chain.deref()));
|
self.sync.write().unwrap().maintain_sync(&mut NetSyncIo::new(io, self.chain.deref()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
91
sync/src/tests/chain.rs
Normal file
91
sync/src/tests/chain.rs
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
use util::*;
|
||||||
|
use ethcore::client::{BlockChainClient};
|
||||||
|
use io::SyncIo;
|
||||||
|
use chain::{SyncState};
|
||||||
|
use super::helpers::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn two_peers() {
|
||||||
|
::env_logger::init().ok();
|
||||||
|
let mut net = TestNet::new(3);
|
||||||
|
net.peer_mut(1).chain.add_blocks(1000, false);
|
||||||
|
net.peer_mut(2).chain.add_blocks(1000, false);
|
||||||
|
net.sync();
|
||||||
|
assert!(net.peer(0).chain.block_at(1000).is_some());
|
||||||
|
assert_eq!(net.peer(0).chain.blocks.read().unwrap().deref(), net.peer(1).chain.blocks.read().unwrap().deref());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn status_after_sync() {
|
||||||
|
::env_logger::init().ok();
|
||||||
|
let mut net = TestNet::new(3);
|
||||||
|
net.peer_mut(1).chain.add_blocks(1000, false);
|
||||||
|
net.peer_mut(2).chain.add_blocks(1000, false);
|
||||||
|
net.sync();
|
||||||
|
let status = net.peer(0).sync.status();
|
||||||
|
assert_eq!(status.state, SyncState::Idle);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn takes_few_steps() {
|
||||||
|
let mut net = TestNet::new(3);
|
||||||
|
net.peer_mut(1).chain.add_blocks(100, false);
|
||||||
|
net.peer_mut(2).chain.add_blocks(100, false);
|
||||||
|
let total_steps = net.sync();
|
||||||
|
assert!(total_steps < 7);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn empty_blocks() {
|
||||||
|
::env_logger::init().ok();
|
||||||
|
let mut net = TestNet::new(3);
|
||||||
|
for n in 0..200 {
|
||||||
|
net.peer_mut(1).chain.add_blocks(5, n % 2 == 0);
|
||||||
|
net.peer_mut(2).chain.add_blocks(5, n % 2 == 0);
|
||||||
|
}
|
||||||
|
net.sync();
|
||||||
|
assert!(net.peer(0).chain.block_at(1000).is_some());
|
||||||
|
assert_eq!(net.peer(0).chain.blocks.read().unwrap().deref(), net.peer(1).chain.blocks.read().unwrap().deref());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn forked() {
|
||||||
|
::env_logger::init().ok();
|
||||||
|
let mut net = TestNet::new(3);
|
||||||
|
net.peer_mut(0).chain.add_blocks(300, false);
|
||||||
|
net.peer_mut(1).chain.add_blocks(300, false);
|
||||||
|
net.peer_mut(2).chain.add_blocks(300, false);
|
||||||
|
net.peer_mut(0).chain.add_blocks(100, true); //fork
|
||||||
|
net.peer_mut(1).chain.add_blocks(200, false);
|
||||||
|
net.peer_mut(2).chain.add_blocks(200, false);
|
||||||
|
net.peer_mut(1).chain.add_blocks(100, false); //fork between 1 and 2
|
||||||
|
net.peer_mut(2).chain.add_blocks(10, true);
|
||||||
|
// peer 1 has the best chain of 601 blocks
|
||||||
|
let peer1_chain = net.peer(1).chain.numbers.read().unwrap().clone();
|
||||||
|
net.sync();
|
||||||
|
assert_eq!(net.peer(0).chain.numbers.read().unwrap().deref(), &peer1_chain);
|
||||||
|
assert_eq!(net.peer(1).chain.numbers.read().unwrap().deref(), &peer1_chain);
|
||||||
|
assert_eq!(net.peer(2).chain.numbers.read().unwrap().deref(), &peer1_chain);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn restart() {
|
||||||
|
let mut net = TestNet::new(3);
|
||||||
|
net.peer_mut(1).chain.add_blocks(1000, false);
|
||||||
|
net.peer_mut(2).chain.add_blocks(1000, false);
|
||||||
|
|
||||||
|
net.sync_steps(8);
|
||||||
|
|
||||||
|
// make sure that sync has actually happened
|
||||||
|
assert!(net.peer(0).chain.chain_info().best_block_number > 100);
|
||||||
|
net.restart_peer(0);
|
||||||
|
|
||||||
|
let status = net.peer(0).sync.status();
|
||||||
|
assert_eq!(status.state, SyncState::NotSynced);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn status_empty() {
|
||||||
|
let net = TestNet::new(2);
|
||||||
|
assert_eq!(net.peer(0).sync.status().state, SyncState::NotSynced);
|
||||||
|
}
|
@ -4,18 +4,19 @@ use ethcore::block_queue::BlockQueueInfo;
|
|||||||
use ethcore::header::{Header as BlockHeader, BlockNumber};
|
use ethcore::header::{Header as BlockHeader, BlockNumber};
|
||||||
use ethcore::error::*;
|
use ethcore::error::*;
|
||||||
use io::SyncIo;
|
use io::SyncIo;
|
||||||
use chain::{ChainSync, SyncState};
|
use chain::{ChainSync};
|
||||||
|
use ethcore::receipt::Receipt;
|
||||||
|
|
||||||
struct TestBlockChainClient {
|
pub struct TestBlockChainClient {
|
||||||
blocks: RwLock<HashMap<H256, Bytes>>,
|
pub blocks: RwLock<HashMap<H256, Bytes>>,
|
||||||
numbers: RwLock<HashMap<usize, H256>>,
|
pub numbers: RwLock<HashMap<usize, H256>>,
|
||||||
genesis_hash: H256,
|
pub genesis_hash: H256,
|
||||||
last_hash: RwLock<H256>,
|
pub last_hash: RwLock<H256>,
|
||||||
difficulty: RwLock<U256>,
|
pub difficulty: RwLock<U256>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TestBlockChainClient {
|
impl TestBlockChainClient {
|
||||||
fn new() -> TestBlockChainClient {
|
pub fn new() -> TestBlockChainClient {
|
||||||
|
|
||||||
let mut client = TestBlockChainClient {
|
let mut client = TestBlockChainClient {
|
||||||
blocks: RwLock::new(HashMap::new()),
|
blocks: RwLock::new(HashMap::new()),
|
||||||
@ -116,11 +117,28 @@ impl BlockChainClient for TestBlockChainClient {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn state_data(&self, _h: &H256) -> Option<Bytes> {
|
// TODO: returns just hashes instead of node state rlp(?)
|
||||||
|
fn state_data(&self, hash: &H256) -> Option<Bytes> {
|
||||||
|
// starts with 'f' ?
|
||||||
|
if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") {
|
||||||
|
let mut rlp = RlpStream::new();
|
||||||
|
rlp.append(&hash.clone());
|
||||||
|
return Some(rlp.out());
|
||||||
|
}
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_receipts(&self, _h: &H256) -> Option<Bytes> {
|
fn block_receipts(&self, hash: &H256) -> Option<Bytes> {
|
||||||
|
// starts with 'f' ?
|
||||||
|
if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") {
|
||||||
|
let receipt = Receipt::new(
|
||||||
|
H256::zero(),
|
||||||
|
U256::zero(),
|
||||||
|
vec![]);
|
||||||
|
let mut rlp = RlpStream::new();
|
||||||
|
rlp.append(&receipt);
|
||||||
|
return Some(rlp.out());
|
||||||
|
}
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -189,14 +207,14 @@ impl BlockChainClient for TestBlockChainClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct TestIo<'p> {
|
pub struct TestIo<'p> {
|
||||||
chain: &'p mut TestBlockChainClient,
|
pub chain: &'p mut TestBlockChainClient,
|
||||||
queue: &'p mut VecDeque<TestPacket>,
|
pub queue: &'p mut VecDeque<TestPacket>,
|
||||||
sender: Option<PeerId>,
|
pub sender: Option<PeerId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'p> TestIo<'p> {
|
impl<'p> TestIo<'p> {
|
||||||
fn new(chain: &'p mut TestBlockChainClient, queue: &'p mut VecDeque<TestPacket>, sender: Option<PeerId>) -> TestIo<'p> {
|
pub fn new(chain: &'p mut TestBlockChainClient, queue: &'p mut VecDeque<TestPacket>, sender: Option<PeerId>) -> TestIo<'p> {
|
||||||
TestIo {
|
TestIo {
|
||||||
chain: chain,
|
chain: chain,
|
||||||
queue: queue,
|
queue: queue,
|
||||||
@ -235,21 +253,21 @@ impl<'p> SyncIo for TestIo<'p> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct TestPacket {
|
pub struct TestPacket {
|
||||||
data: Bytes,
|
pub data: Bytes,
|
||||||
packet_id: PacketId,
|
pub packet_id: PacketId,
|
||||||
recipient: PeerId,
|
pub recipient: PeerId,
|
||||||
}
|
}
|
||||||
|
|
||||||
struct TestPeer {
|
pub struct TestPeer {
|
||||||
chain: TestBlockChainClient,
|
pub chain: TestBlockChainClient,
|
||||||
sync: ChainSync,
|
pub sync: ChainSync,
|
||||||
queue: VecDeque<TestPacket>,
|
pub queue: VecDeque<TestPacket>,
|
||||||
}
|
}
|
||||||
|
|
||||||
struct TestNet {
|
pub struct TestNet {
|
||||||
peers: Vec<TestPeer>,
|
pub peers: Vec<TestPeer>,
|
||||||
started: bool,
|
pub started: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TestNet {
|
impl TestNet {
|
||||||
@ -329,89 +347,3 @@ impl TestNet {
|
|||||||
self.peers.iter().all(|p| p.queue.is_empty())
|
self.peers.iter().all(|p| p.queue.is_empty())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn chain_two_peers() {
|
|
||||||
::env_logger::init().ok();
|
|
||||||
let mut net = TestNet::new(3);
|
|
||||||
net.peer_mut(1).chain.add_blocks(1000, false);
|
|
||||||
net.peer_mut(2).chain.add_blocks(1000, false);
|
|
||||||
net.sync();
|
|
||||||
assert!(net.peer(0).chain.block_at(1000).is_some());
|
|
||||||
assert_eq!(net.peer(0).chain.blocks.read().unwrap().deref(), net.peer(1).chain.blocks.read().unwrap().deref());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn chain_status_after_sync() {
|
|
||||||
::env_logger::init().ok();
|
|
||||||
let mut net = TestNet::new(3);
|
|
||||||
net.peer_mut(1).chain.add_blocks(1000, false);
|
|
||||||
net.peer_mut(2).chain.add_blocks(1000, false);
|
|
||||||
net.sync();
|
|
||||||
let status = net.peer(0).sync.status();
|
|
||||||
assert_eq!(status.state, SyncState::Idle);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn chain_takes_few_steps() {
|
|
||||||
let mut net = TestNet::new(3);
|
|
||||||
net.peer_mut(1).chain.add_blocks(100, false);
|
|
||||||
net.peer_mut(2).chain.add_blocks(100, false);
|
|
||||||
let total_steps = net.sync();
|
|
||||||
assert!(total_steps < 7);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn chain_empty_blocks() {
|
|
||||||
::env_logger::init().ok();
|
|
||||||
let mut net = TestNet::new(3);
|
|
||||||
for n in 0..200 {
|
|
||||||
net.peer_mut(1).chain.add_blocks(5, n % 2 == 0);
|
|
||||||
net.peer_mut(2).chain.add_blocks(5, n % 2 == 0);
|
|
||||||
}
|
|
||||||
net.sync();
|
|
||||||
assert!(net.peer(0).chain.block_at(1000).is_some());
|
|
||||||
assert_eq!(net.peer(0).chain.blocks.read().unwrap().deref(), net.peer(1).chain.blocks.read().unwrap().deref());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn chain_forked() {
|
|
||||||
::env_logger::init().ok();
|
|
||||||
let mut net = TestNet::new(3);
|
|
||||||
net.peer_mut(0).chain.add_blocks(300, false);
|
|
||||||
net.peer_mut(1).chain.add_blocks(300, false);
|
|
||||||
net.peer_mut(2).chain.add_blocks(300, false);
|
|
||||||
net.peer_mut(0).chain.add_blocks(100, true); //fork
|
|
||||||
net.peer_mut(1).chain.add_blocks(200, false);
|
|
||||||
net.peer_mut(2).chain.add_blocks(200, false);
|
|
||||||
net.peer_mut(1).chain.add_blocks(100, false); //fork between 1 and 2
|
|
||||||
net.peer_mut(2).chain.add_blocks(10, true);
|
|
||||||
// peer 1 has the best chain of 601 blocks
|
|
||||||
let peer1_chain = net.peer(1).chain.numbers.read().unwrap().clone();
|
|
||||||
net.sync();
|
|
||||||
assert_eq!(net.peer(0).chain.numbers.read().unwrap().deref(), &peer1_chain);
|
|
||||||
assert_eq!(net.peer(1).chain.numbers.read().unwrap().deref(), &peer1_chain);
|
|
||||||
assert_eq!(net.peer(2).chain.numbers.read().unwrap().deref(), &peer1_chain);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn chain_restart() {
|
|
||||||
let mut net = TestNet::new(3);
|
|
||||||
net.peer_mut(1).chain.add_blocks(1000, false);
|
|
||||||
net.peer_mut(2).chain.add_blocks(1000, false);
|
|
||||||
|
|
||||||
net.sync_steps(8);
|
|
||||||
|
|
||||||
// make sure that sync has actually happened
|
|
||||||
assert!(net.peer(0).chain.chain_info().best_block_number > 100);
|
|
||||||
net.restart_peer(0);
|
|
||||||
|
|
||||||
let status = net.peer(0).sync.status();
|
|
||||||
assert_eq!(status.state, SyncState::NotSynced);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn chain_status_empty() {
|
|
||||||
let net = TestNet::new(2);
|
|
||||||
assert_eq!(net.peer(0).sync.status().state, SyncState::NotSynced);
|
|
||||||
}
|
|
2
sync/src/tests/mod.rs
Normal file
2
sync/src/tests/mod.rs
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
pub mod helpers;
|
||||||
|
mod chain;
|
Loading…
Reference in New Issue
Block a user