Merge branch 'master' into issues/4673

This commit is contained in:
Joseph Mark 2017-07-21 19:47:14 +07:00
commit 0ec917e980
No known key found for this signature in database
GPG Key ID: 9BA8D1EE2E53C283
38 changed files with 189 additions and 3477 deletions

View File

@ -561,11 +561,9 @@ docker-build:
- docker info
script:
- if [ "$CI_BUILD_REF_NAME" == "beta-release" ]; then DOCKER_TAG="latest"; else DOCKER_TAG=$CI_BUILD_REF_NAME; fi
- docker login -u $Docker_Hub_User -p $Docker_Hub_Pass
- sh scripts/docker-build.sh $DOCKER_TAG ethcore
- docker logout
- echo "Tag:" $DOCKER_TAG
- docker login -u $Docker_Hub_User_Parity -p $Docker_Hub_Pass_Parity
- sh scripts/docker-build.sh $DOCKER_TAG parity
- sh scripts/docker-build.sh $DOCKER_TAG
- docker logout
tags:
- docker
@ -613,11 +611,12 @@ test-rust-stable:
image: parity/rust:gitlab-ci
before_script:
- git submodule update --init --recursive
- export RUST_FILES_MODIFIED=$(git --no-pager diff --name-only $CI_BUILD_REF^ $CI_BUILD_REF | grep -v -e ^js -e ^\\. -e ^LICENSE -e ^README.md -e ^appveyor.yml -e ^test.sh -e ^windows/ -e ^scripts/ -e^mac/ -e ^nsis/ | wc -l)
- export RUST_FILES_MODIFIED=$(git --no-pager diff --name-only $CI_BUILD_REF^ $CI_BUILD_REF | grep -v -e ^js -e ^\\. -e ^LICENSE -e ^README.md -e ^test.sh -e ^windows/ -e ^scripts/ -e^mac/ -e ^nsis/ | wc -l)
script:
- rustup show
- export RUST_BACKTRACE=1
- if [ $RUST_FILES_MODIFIED -eq 0 ]; then echo "Skipping Rust tests since no Rust files modified."; else ./test.sh $CARGOFLAGS; fi
- if [ "$CI_BUILD_REF_NAME" == "nightly" ]; then sh scripts/aura-test.sh; fi
tags:
- rust
- rust-stable

3426
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -53,6 +53,7 @@ parity-rpc-client = { path = "rpc_client" }
parity-updater = { path = "updater" }
parity-whisper = { path = "whisper" }
path = { path = "util/path" }
panic_hook = { path = "panic_hook" }
parity-dapps = { path = "dapps", optional = true }
clippy = { version = "0.0.103", optional = true}

View File

@ -4,7 +4,7 @@ WORKDIR /build
#ENV for build TAG
ARG BUILD_TAG
ENV BUILD_TAG ${BUILD_TAG:-master}
RUN echo $BUILD_TAG
RUN echo "Build tag:" $BUILD_TAG
# install tools and dependencies
RUN apt-get update && \
apt-get install -y --force-yes --no-install-recommends \
@ -48,7 +48,7 @@ RUN apt-get update && \
# show backtraces
RUST_BACKTRACE=1 && \
# build parity
cd /build&&git clone https://github.com/paritytech/parity && \
cd /build&&git clone https://github.com/paritytech/parity && \
cd parity && \
git pull&& \
git checkout $BUILD_TAG && \

View File

@ -211,6 +211,7 @@ impl<Cost: CostType> Interpreter<Cost> {
if (instruction == instructions::DELEGATECALL && !schedule.have_delegate_call) ||
(instruction == instructions::CREATE2 && !schedule.have_create2) ||
(instruction == instructions::STATICCALL && !schedule.have_static_call) ||
((instruction == instructions::RETURNDATACOPY || instruction == instructions::RETURNDATASIZE) && !schedule.have_return_data) ||
(instruction == instructions::REVERT && !schedule.have_revert) {
return Err(evm::Error::BadInstruction {

View File

@ -107,6 +107,8 @@ pub struct Schedule {
pub blockhash_gas: usize,
/// Static Call opcode enabled.
pub have_static_call: bool,
/// RETURNDATA and RETURNDATASIZE opcodes enabled.
pub have_return_data: bool,
/// Kill basic accounts below this balance if touched.
pub kill_dust: CleanDustMode,
}
@ -140,6 +142,7 @@ impl Schedule {
have_delegate_call: true,
have_create2: false,
have_revert: false,
have_return_data: false,
stack_limit: 1024,
max_depth: 1024,
tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0],
@ -190,6 +193,7 @@ impl Schedule {
schedule.have_create2 = true;
schedule.have_revert = true;
schedule.have_static_call = true;
schedule.have_return_data = true;
schedule.blockhash_gas = 350;
schedule
}
@ -200,6 +204,7 @@ impl Schedule {
have_delegate_call: hdc,
have_create2: false,
have_revert: false,
have_return_data: false,
stack_limit: 1024,
max_depth: 1024,
tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0],

@ -1 +1 @@
Subproject commit 4e8b9be3fba16ec32e0cdf50b8f9329826283aaa
Subproject commit ef191fdc61cf76cdb9cdc147465fb447304b0ed2

View File

@ -558,9 +558,9 @@ impl SnapshotService for Service {
self.reader.read().as_ref().map(|r| r.manifest().clone())
}
fn min_supported_version(&self) -> Option<u64> {
fn supported_versions(&self) -> Option<(u64, u64)> {
self.engine.snapshot_components()
.map(|c| c.min_supported_version())
.map(|c| (c.min_supported_version(), c.current_version()))
}
fn chunk(&self, hash: H256) -> Option<Bytes> {

View File

@ -27,9 +27,9 @@ pub trait SnapshotService : Sync + Send {
/// Query the most recent manifest data.
fn manifest(&self) -> Option<ManifestData>;
/// Get the minimum supported snapshot version number.
/// Get the supported range of snapshot version numbers.
/// `None` indicates warp sync isn't supported by the consensus engine.
fn min_supported_version(&self) -> Option<u64>;
fn supported_versions(&self) -> Option<(u64, u64)>;
/// Get raw chunk for a given hash.
fn chunk(&self, hash: H256) -> Option<Bytes>;

View File

@ -100,6 +100,7 @@ impl CommonParams {
schedule.have_create2 = block_number >= self.eip86_transition;
schedule.have_revert = block_number >= self.eip140_transition;
schedule.have_static_call = block_number >= self.eip214_transition;
schedule.have_return_data = block_number >= self.eip211_transition;
if block_number >= self.eip210_transition {
schedule.blockhash_gas = 350;
}

View File

@ -9,6 +9,7 @@ serde = "1.0"
serde_derive = "1.0"
rustc-hex = "1.0"
docopt = "0.8"
panic_hook = { path = "../../panic_hook" }
[[bin]]
name = "ethkey"

View File

@ -20,12 +20,14 @@ extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate ethkey;
extern crate panic_hook;
use std::{env, fmt, process};
use std::num::ParseIntError;
use docopt::Docopt;
use rustc_hex::{FromHex, FromHexError};
use ethkey::{KeyPair, Random, Brain, Prefix, Error as EthkeyError, Generator, sign, verify_public, verify_address};
use std::io;
pub const USAGE: &'static str = r#"
Ethereum keys generator.
@ -87,6 +89,7 @@ enum Error {
FromHex(FromHexError),
ParseInt(ParseIntError),
Docopt(docopt::Error),
Io(io::Error),
}
impl From<EthkeyError> for Error {
@ -113,6 +116,12 @@ impl From<docopt::Error> for Error {
}
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Self {
Error::Io(err)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
@ -120,6 +129,7 @@ impl fmt::Display for Error {
Error::FromHex(ref e) => write!(f, "{}", e),
Error::ParseInt(ref e) => write!(f, "{}", e),
Error::Docopt(ref e) => write!(f, "{}", e),
Error::Io(ref e) => write!(f, "{}", e),
}
}
}
@ -146,6 +156,8 @@ impl DisplayMode {
}
fn main() {
panic_hook::set();
match execute(env::args()) {
Ok(ok) => println!("{}", ok),
Err(err) => {
@ -176,17 +188,17 @@ fn execute<S, I>(command: I) -> Result<String, Error> where I: IntoIterator<Item
} else if args.cmd_generate {
let display_mode = DisplayMode::new(&args);
let keypair = if args.cmd_random {
Random.generate()
Random.generate()?
} else if args.cmd_prefix {
let prefix = args.arg_prefix.from_hex()?;
let iterations = usize::from_str_radix(&args.arg_iterations, 10)?;
Prefix::new(prefix, iterations).generate()
Prefix::new(prefix, iterations).generate()?
} else if args.cmd_brain {
Brain::new(args.arg_seed).generate()
Brain::new(args.arg_seed).generate().expect("Brain wallet generator is infallible; qed")
} else {
unreachable!();
};
Ok(display(keypair?, display_mode))
Ok(display(keypair, display_mode))
} else if args.cmd_sign {
let secret = args.arg_secret.parse().map_err(|_| EthkeyError::InvalidSecret)?;
let message = args.arg_message.parse().map_err(|_| EthkeyError::InvalidMessage)?;

View File

@ -9,6 +9,7 @@ serde = "1.0"
serde_derive = "1.0"
docopt = "0.8"
ethstore = { path = "../" }
panic_hook = { path = "../../panic_hook" }
[[bin]]
name = "ethstore"

View File

@ -20,6 +20,7 @@ extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate ethstore;
extern crate panic_hook;
use std::{env, process, fs, fmt};
use std::io::Read;
@ -134,6 +135,8 @@ impl fmt::Display for Error {
}
fn main() {
panic_hook::set();
match execute(env::args()) {
Ok(result) => println!("{}", result),
Err(err) => {

View File

@ -16,6 +16,7 @@ serde_derive = "1.0"
ethcore = { path = "../ethcore" }
ethcore-util = { path = "../util" }
evm = { path = "../ethcore/evm" }
panic_hook = { path = "../panic_hook" }
[features]
evm-debug = ["ethcore/evm-debug-tests"]

View File

@ -26,6 +26,7 @@ extern crate serde_derive;
extern crate docopt;
extern crate ethcore_util as util;
extern crate evm;
extern crate panic_hook;
use std::sync::Arc;
use std::{fmt, fs};
@ -63,6 +64,8 @@ General options:
fn main() {
panic_hook::set();
let args: Args = Docopt::new(USAGE).and_then(|d| d.deserialize()).unwrap_or_else(|e| e.exit());
if args.flag_json {

View File

@ -1,6 +1,6 @@
{
"name": "parity.js",
"version": "1.7.101",
"version": "1.8.3",
"main": "release/index.js",
"jsnext:main": "src/index.js",
"author": "Parity Team <admin@parity.io>",

File diff suppressed because one or more lines are too long

View File

@ -104,7 +104,7 @@ contract WalletLibrary is WalletEvents {
// constructor is given number of sigs required to do protected "onlymanyowners" transactions
// as well as the selection of addresses capable of confirming them.
function initMultiowned(address[] _owners, uint _required) {
function initMultiowned(address[] _owners, uint _required) only_uninitialized {
m_numOwners = _owners.length + 1;
m_owners[1] = uint(msg.sender);
m_ownerIndex[uint(msg.sender)] = 1;
@ -198,7 +198,7 @@ contract WalletLibrary is WalletEvents {
}
// constructor - stores initial daily limit and records the present day's index.
function initDaylimit(uint _limit) {
function initDaylimit(uint _limit) only_uninitialized {
m_dailyLimit = _limit;
m_lastDay = today();
}
@ -211,9 +211,12 @@ contract WalletLibrary is WalletEvents {
m_spentToday = 0;
}
// throw unless the contract is not yet initialized.
modifier only_uninitialized { if (m_numOwners > 0) throw; _; }
// constructor - just pass on the owner array to the multiowned and
// the limit to daylimit
function initWallet(address[] _owners, uint _required, uint _daylimit) {
function initWallet(address[] _owners, uint _required, uint _daylimit) only_uninitialized {
initDaylimit(_daylimit);
initMultiowned(_owners, _required);
}

View File

@ -191,6 +191,8 @@ export default class CreateWalletStore {
return null; // exception when registry is not available
})
.then((address) => {
console.warn('WalletLibrary address in registry', address);
if (!address || /^(0x)?0*$/.test(address)) {
return null;
}

10
panic_hook/Cargo.toml Normal file
View File

@ -0,0 +1,10 @@
[package]
description = "Parity custom panic hook"
homepage = "http://parity.io"
license = "GPL-3.0"
name = "panic_hook"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
backtrace = "0.3.2"

69
panic_hook/src/lib.rs Normal file
View File

@ -0,0 +1,69 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Custom panic hook with bug report link
extern crate backtrace;
use backtrace::Backtrace;
use std::io::{self, Write};
use std::panic::{self, PanicInfo};
use std::thread;
/// Set the panic hook
pub fn set() {
panic::set_hook(Box::new(panic_hook));
}
static ABOUT_PANIC: &str = "
This is a bug. Please report it at:
https://github.com/paritytech/parity/issues/new
";
fn panic_hook(info: &PanicInfo) {
let location = info.location();
let file = location.as_ref().map(|l| l.file()).unwrap_or("<unknown>");
let line = location.as_ref().map(|l| l.line()).unwrap_or(0);
let msg = match info.payload().downcast_ref::<&'static str>() {
Some(s) => *s,
None => match info.payload().downcast_ref::<String>() {
Some(s) => &s[..],
None => "Box<Any>",
}
};
let thread = thread::current();
let name = thread.name().unwrap_or("<unnamed>");
let backtrace = Backtrace::new();
let mut stderr = io::stderr();
let _ = writeln!(stderr, "");
let _ = writeln!(stderr, "====================");
let _ = writeln!(stderr, "");
let _ = writeln!(stderr, "{:?}", backtrace);
let _ = writeln!(stderr, "");
let _ = writeln!(
stderr,
"Thread '{}' panicked at '{}', {}:{}",
name, msg, file, line
);
let _ = writeln!(stderr, "{}", ABOUT_PANIC);
}

View File

@ -531,10 +531,12 @@ pub fn kill_db(cmd: KillBlockchain) -> Result<(), String> {
let genesis_hash = spec.genesis_header().hash();
let db_dirs = cmd.dirs.database(genesis_hash, None, spec.data_dir);
let user_defaults_path = db_dirs.user_defaults_path();
let user_defaults = UserDefaults::load(&user_defaults_path)?;
let mut user_defaults = UserDefaults::load(&user_defaults_path)?;
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
let dir = db_dirs.db_path(algorithm);
fs::remove_dir_all(&dir).map_err(|e| format!("Error removing database: {:?}", e))?;
user_defaults.is_first_launch = true;
user_defaults.save(&user_defaults_path)?;
info!("Database deleted.");
Ok(())
}

View File

@ -152,7 +152,7 @@ impl InformantData for FullNodeInformantData {
max_peers: status.current_max_peers(net_config.min_peers, net_config.max_peers),
}))
}
_ => (is_major_importing(None, queue_info.clone()), None),
_ => (is_major_importing(self.sync.as_ref().map(|s| s.status().state), queue_info.clone()), None),
};
Report {
@ -254,8 +254,6 @@ impl<T: InformantData> Informant<T> {
return;
}
*self.last_tick.write() = Instant::now();
let (client_report, full_report) = {
let mut last_report = self.last_report.lock();
let full_report = self.target.report();
@ -287,6 +285,8 @@ impl<T: InformantData> Informant<T> {
return;
}
*self.last_tick.write() = Instant::now();
let paint = |c: Style, t: String| match self.with_color && stdout_isatty() {
true => format!("{}", c.paint(t)),
false => t,

View File

@ -57,6 +57,7 @@ extern crate ethcore_logger;
extern crate ethcore_util as util;
extern crate ethkey;
extern crate ethsync;
extern crate panic_hook;
extern crate parity_hash_fetch as hash_fetch;
extern crate parity_ipfs_api;
extern crate parity_local_store as local_store;
@ -315,8 +316,7 @@ macro_rules! trace_main {
}
fn main() {
// Always print backtrace on panic.
env::set_var("RUST_BACKTRACE", "1");
panic_hook::set();
// assuming the user is not running with `--force-direct`, then:
// if argv[0] == "parity" and this executable != ~/.parity-updates/parity, run that instead.

View File

@ -754,6 +754,7 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> R
service.register_io_handler(informant.clone()).map_err(|_| "Unable to register informant handler".to_owned())?;
// save user defaults
user_defaults.is_first_launch = false;
user_defaults.pruning = algorithm;
user_defaults.tracing = tracing;
user_defaults.fat_db = fat_db;

View File

@ -41,6 +41,7 @@ impl Serialize for UserDefaults {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
let mut map: BTreeMap<String, Value> = BTreeMap::new();
map.insert("is_first_launch".into(), Value::Bool(self.is_first_launch));
map.insert("pruning".into(), Value::String(self.pruning.as_str().into()));
map.insert("tracing".into(), Value::Bool(self.tracing));
map.insert("fat_db".into(), Value::Bool(self.fat_db));

View File

@ -21,9 +21,10 @@ use ethsync::SyncState;
/// Check if client is during major sync or during block import.
pub fn is_major_importing(sync_state: Option<SyncState>, queue_info: BlockQueueInfo) -> bool {
let is_syncing_state = sync_state.map_or(false, |s|
s != SyncState::Idle && s != SyncState::NewBlocks
);
let is_syncing_state = sync_state.map_or(false, |s| match s {
SyncState::Idle | SyncState::NewBlocks | SyncState::WaitingPeers => false,
_ => true,
});
let is_verifying = queue_info.unverified_queue_size + queue_info.verified_queue_size > 3;
is_verifying || is_syncing_state
}

View File

@ -41,7 +41,7 @@ impl TestSnapshotService {
impl SnapshotService for TestSnapshotService {
fn manifest(&self) -> Option<ManifestData> { None }
fn min_supported_version(&self) -> Option<u64> { None }
fn supported_versions(&self) -> Option<(u64, u64)> { None }
fn chunk(&self, _hash: H256) -> Option<Bytes> { None }
fn status(&self) -> RestorationStatus { self.status.lock().clone() }
fn begin_restore(&self, _manifest: ManifestData) { }

9
scripts/aura-test.sh Executable file
View File

@ -0,0 +1,9 @@
#!/bin/bash
cargo build -j $(nproc) --release --features final $CARGOFLAGS
git clone https://github.com/paritytech/parity-import-tests
cp target/release/parity parity-import-tests/aura/parity
cd parity-import-tests/aura
echo "Start Aura test"
parity import blocks.rlp --chain chain.json
parity restore snap --chain chain.json
echo "Aura test complete"

0
scripts/deb-build.sh Normal file → Executable file
View File

0
scripts/deploy.sh Normal file → Executable file
View File

8
scripts/docker-build.sh Normal file → Executable file
View File

@ -1,5 +1,7 @@
#!/bin/bash
cd docker/hub
if [ "$1" == "latest" ]; then DOCKER_BUILD_TAG="beta-release"; fi
docker build --build-arg BUILD_TAG=$DOCKER_BUILD_TAG --no-cache=true --tag $2/parity:$1 .
docker push $2/parity:$1
DOCKER_BUILD_TAG=$1
echo "Docker build tag: " $DOCKER_BUILD_TAG
docker build --build-arg BUILD_TAG=$DOCKER_BUILD_TAG --no-cache=true --tag parity/parity:$DOCKER_BUILD_TAG .
docker run -it parity/parity:$DOCKER_BUILD_TAG -v
docker push parity/parity:$DOCKER_BUILD_TAG

0
scripts/targets.sh Normal file → Executable file
View File

View File

@ -504,7 +504,7 @@ impl ChainSync {
}
fn maybe_start_snapshot_sync(&mut self, io: &mut SyncIo) {
if !self.enable_warp_sync || io.snapshot_service().min_supported_version().is_none() {
if !self.enable_warp_sync || io.snapshot_service().supported_versions().is_none() {
return;
}
if self.state != SyncState::WaitingPeers && self.state != SyncState::Blocks && self.state != SyncState::Waiting {
@ -1044,11 +1044,11 @@ impl ChainSync {
Ok(manifest) => manifest,
};
let is_supported_version = io.snapshot_service().min_supported_version()
.map_or(false, |v| manifest.version >= v);
let is_supported_version = io.snapshot_service().supported_versions()
.map_or(false, |(l, h)| manifest.version >= l && manifest.version <= h);
if !is_supported_version {
trace!(target: "sync", "{}: Snapshot manifest version too low: {}", peer_id, manifest.version);
trace!(target: "sync", "{}: Snapshot manifest version not supported: {}", peer_id, manifest.version);
io.disable_peer(peer_id);
self.continue_sync(io);
return Ok(());

View File

@ -71,8 +71,8 @@ impl SnapshotService for TestSnapshotService {
self.manifest.as_ref().cloned()
}
fn min_supported_version(&self) -> Option<u64> {
Some(1)
fn supported_versions(&self) -> Option<(u64, u64)> {
Some((1, 2))
}
fn chunk(&self, hash: H256) -> Option<Bytes> {

View File

@ -857,14 +857,19 @@ impl Host {
// Add it to the node table
if !s.info.originated {
if let Ok(address) = s.remote_addr() {
let entry = NodeEntry { id: id, endpoint: NodeEndpoint { address: address, udp_port: address.port() } };
self.nodes.write().add_node(Node::new(entry.id.clone(), entry.endpoint.clone()));
// We can't know remote listening ports, so just assume defaults and hope for the best.
let endpoint = NodeEndpoint { address: SocketAddr::new(address.ip(), DEFAULT_PORT), udp_port: DEFAULT_PORT };
let entry = NodeEntry { id: id, endpoint: endpoint };
let mut nodes = self.nodes.write();
if !nodes.contains(&entry.id) {
nodes.add_node(Node::new(entry.id.clone(), entry.endpoint.clone()));
let mut discovery = self.discovery.lock();
if let Some(ref mut discovery) = *discovery {
discovery.add_node(entry);
}
}
}
}
for (p, _) in self.handlers.read().iter() {
if s.have_capability(*p) {
ready_data.push(*p);

View File

@ -236,6 +236,11 @@ impl NodeTable {
self.nodes.get_mut(id)
}
/// Check if a node exists in the table.
pub fn contains(&self, id: &NodeId) -> bool {
self.nodes.contains_key(id)
}
/// Apply table changes coming from discovery
pub fn update(&mut self, mut update: TableUpdates, reserved: &HashSet<NodeId>) {
for (_, node) in update.added.drain() {