Merge branch 'master' into ui-2
This commit is contained in:
commit
3d24ae0981
@ -48,7 +48,7 @@ pub trait Fetcher: Send + Sync + 'static {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct ContentFetcher<F: Fetch = FetchClient, R: URLHint + 'static = URLHintContract> {
|
pub struct ContentFetcher<F: Fetch = FetchClient, R: URLHint + 'static = URLHintContract> {
|
||||||
dapps_path: PathBuf,
|
cache_path: PathBuf,
|
||||||
resolver: R,
|
resolver: R,
|
||||||
cache: Arc<Mutex<ContentCache>>,
|
cache: Arc<Mutex<ContentCache>>,
|
||||||
sync: Arc<SyncStatus>,
|
sync: Arc<SyncStatus>,
|
||||||
@ -61,7 +61,7 @@ pub struct ContentFetcher<F: Fetch = FetchClient, R: URLHint + 'static = URLHint
|
|||||||
impl<R: URLHint + 'static, F: Fetch> Drop for ContentFetcher<F, R> {
|
impl<R: URLHint + 'static, F: Fetch> Drop for ContentFetcher<F, R> {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
// Clear cache path
|
// Clear cache path
|
||||||
let _ = fs::remove_dir_all(&self.dapps_path);
|
let _ = fs::remove_dir_all(&self.cache_path);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -73,11 +73,11 @@ impl<R: URLHint + 'static, F: Fetch> ContentFetcher<F, R> {
|
|||||||
remote: Remote,
|
remote: Remote,
|
||||||
fetch: F,
|
fetch: F,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let mut dapps_path = env::temp_dir();
|
let mut cache_path = env::temp_dir();
|
||||||
dapps_path.push(random_filename());
|
cache_path.push(random_filename());
|
||||||
|
|
||||||
ContentFetcher {
|
ContentFetcher {
|
||||||
dapps_path: dapps_path,
|
cache_path: cache_path,
|
||||||
resolver: resolver,
|
resolver: resolver,
|
||||||
sync: sync_status,
|
sync: sync_status,
|
||||||
cache: Arc::new(Mutex::new(ContentCache::default())),
|
cache: Arc::new(Mutex::new(ContentCache::default())),
|
||||||
@ -200,7 +200,7 @@ impl<R: URLHint + 'static, F: Fetch> Fetcher for ContentFetcher<F, R> {
|
|||||||
control,
|
control,
|
||||||
installers::Dapp::new(
|
installers::Dapp::new(
|
||||||
content_id.clone(),
|
content_id.clone(),
|
||||||
self.dapps_path.clone(),
|
self.cache_path.clone(),
|
||||||
Box::new(on_done),
|
Box::new(on_done),
|
||||||
self.embeddable_on.clone(),
|
self.embeddable_on.clone(),
|
||||||
),
|
),
|
||||||
@ -219,7 +219,7 @@ impl<R: URLHint + 'static, F: Fetch> Fetcher for ContentFetcher<F, R> {
|
|||||||
installers::Content::new(
|
installers::Content::new(
|
||||||
content_id.clone(),
|
content_id.clone(),
|
||||||
content.mime,
|
content.mime,
|
||||||
self.dapps_path.clone(),
|
self.cache_path.clone(),
|
||||||
Box::new(on_done),
|
Box::new(on_done),
|
||||||
),
|
),
|
||||||
self.embeddable_on.clone(),
|
self.embeddable_on.clone(),
|
||||||
|
@ -25,7 +25,7 @@ use std::mem;
|
|||||||
use std::ptr;
|
use std::ptr;
|
||||||
use sha3;
|
use sha3;
|
||||||
use std::slice;
|
use std::slice;
|
||||||
use std::path::PathBuf;
|
use std::path::{Path, PathBuf};
|
||||||
use std::io::{self, Read, Write};
|
use std::io::{self, Read, Write};
|
||||||
use std::fs::{self, File};
|
use std::fs::{self, File};
|
||||||
|
|
||||||
@ -86,6 +86,7 @@ impl Node {
|
|||||||
pub type H256 = [u8; 32];
|
pub type H256 = [u8; 32];
|
||||||
|
|
||||||
pub struct Light {
|
pub struct Light {
|
||||||
|
cache_dir: PathBuf,
|
||||||
block_number: u64,
|
block_number: u64,
|
||||||
cache: Vec<Node>,
|
cache: Vec<Node>,
|
||||||
seed_compute: Mutex<SeedHashCompute>,
|
seed_compute: Mutex<SeedHashCompute>,
|
||||||
@ -94,8 +95,8 @@ pub struct Light {
|
|||||||
/// Light cache structure
|
/// Light cache structure
|
||||||
impl Light {
|
impl Light {
|
||||||
/// Create a new light cache for a given block number
|
/// Create a new light cache for a given block number
|
||||||
pub fn new(block_number: u64) -> Light {
|
pub fn new<T: AsRef<Path>>(cache_dir: T, block_number: u64) -> Light {
|
||||||
light_new(block_number)
|
light_new(cache_dir, block_number)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Calculate the light boundary data
|
/// Calculate the light boundary data
|
||||||
@ -105,17 +106,15 @@ impl Light {
|
|||||||
light_compute(self, header_hash, nonce)
|
light_compute(self, header_hash, nonce)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn file_path(seed_hash: H256) -> PathBuf {
|
pub fn file_path<T: AsRef<Path>>(cache_dir: T, seed_hash: H256) -> PathBuf {
|
||||||
let mut home = ::std::env::home_dir().unwrap();
|
let mut cache_dir = cache_dir.as_ref().to_path_buf();
|
||||||
home.push(".ethash");
|
cache_dir.push(to_hex(&seed_hash));
|
||||||
home.push("light");
|
cache_dir
|
||||||
home.push(to_hex(&seed_hash));
|
|
||||||
home
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn from_file(block_number: u64) -> io::Result<Light> {
|
pub fn from_file<T: AsRef<Path>>(cache_dir: T, block_number: u64) -> io::Result<Light> {
|
||||||
let seed_compute = SeedHashCompute::new();
|
let seed_compute = SeedHashCompute::new();
|
||||||
let path = Light::file_path(seed_compute.get_seedhash(block_number));
|
let path = Light::file_path(&cache_dir, seed_compute.get_seedhash(block_number));
|
||||||
let mut file = File::open(path)?;
|
let mut file = File::open(path)?;
|
||||||
|
|
||||||
let cache_size = get_cache_size(block_number);
|
let cache_size = get_cache_size(block_number);
|
||||||
@ -128,19 +127,22 @@ impl Light {
|
|||||||
let buf = unsafe { slice::from_raw_parts_mut(nodes.as_mut_ptr() as *mut u8, cache_size) };
|
let buf = unsafe { slice::from_raw_parts_mut(nodes.as_mut_ptr() as *mut u8, cache_size) };
|
||||||
file.read_exact(buf)?;
|
file.read_exact(buf)?;
|
||||||
Ok(Light {
|
Ok(Light {
|
||||||
|
block_number,
|
||||||
|
cache_dir: cache_dir.as_ref().to_path_buf(),
|
||||||
cache: nodes,
|
cache: nodes,
|
||||||
block_number: block_number,
|
|
||||||
seed_compute: Mutex::new(seed_compute),
|
seed_compute: Mutex::new(seed_compute),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn to_file(&self) -> io::Result<PathBuf> {
|
pub fn to_file(&self) -> io::Result<PathBuf> {
|
||||||
let seed_compute = self.seed_compute.lock();
|
let seed_compute = self.seed_compute.lock();
|
||||||
let path = Light::file_path(seed_compute.get_seedhash(self.block_number));
|
let path = Light::file_path(&self.cache_dir, seed_compute.get_seedhash(self.block_number));
|
||||||
|
|
||||||
if self.block_number >= ETHASH_EPOCH_LENGTH * 2 {
|
if self.block_number >= ETHASH_EPOCH_LENGTH * 2 {
|
||||||
let deprecated = Light::file_path(
|
let deprecated = Light::file_path(
|
||||||
seed_compute.get_seedhash(self.block_number - ETHASH_EPOCH_LENGTH * 2));
|
&self.cache_dir,
|
||||||
|
seed_compute.get_seedhash(self.block_number - ETHASH_EPOCH_LENGTH * 2)
|
||||||
|
);
|
||||||
|
|
||||||
if deprecated.exists() {
|
if deprecated.exists() {
|
||||||
debug!(target: "ethash", "removing: {:?}", &deprecated);
|
debug!(target: "ethash", "removing: {:?}", &deprecated);
|
||||||
@ -341,14 +343,12 @@ fn calculate_dag_item(node_index: u32, cache: &[Node]) -> Node {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn light_new(block_number: u64) -> Light {
|
fn light_new<T: AsRef<Path>>(cache_dir: T, block_number: u64) -> Light {
|
||||||
let seed_compute = SeedHashCompute::new();
|
let seed_compute = SeedHashCompute::new();
|
||||||
let seedhash = seed_compute.get_seedhash(block_number);
|
let seedhash = seed_compute.get_seedhash(block_number);
|
||||||
let cache_size = get_cache_size(block_number);
|
let cache_size = get_cache_size(block_number);
|
||||||
|
|
||||||
if cache_size % NODE_BYTES != 0 {
|
assert!(cache_size % NODE_BYTES == 0, "Unaligned cache size");
|
||||||
panic!("Unaligned cache size");
|
|
||||||
}
|
|
||||||
let num_nodes = cache_size / NODE_BYTES;
|
let num_nodes = cache_size / NODE_BYTES;
|
||||||
|
|
||||||
let mut nodes = Vec::with_capacity(num_nodes);
|
let mut nodes = Vec::with_capacity(num_nodes);
|
||||||
@ -372,8 +372,9 @@ fn light_new(block_number: u64) -> Light {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Light {
|
Light {
|
||||||
|
block_number,
|
||||||
|
cache_dir: cache_dir.as_ref().to_path_buf(),
|
||||||
cache: nodes,
|
cache: nodes,
|
||||||
block_number: block_number,
|
|
||||||
seed_compute: Mutex::new(seed_compute),
|
seed_compute: Mutex::new(seed_compute),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -432,7 +433,7 @@ fn test_light_compute() {
|
|||||||
let boundary = [0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2, 0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a, 0xe9, 0x7e, 0x53, 0x84];
|
let boundary = [0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2, 0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a, 0xe9, 0x7e, 0x53, 0x84];
|
||||||
let nonce = 0xd7b3ac70a301a249;
|
let nonce = 0xd7b3ac70a301a249;
|
||||||
// difficulty = 0x085657254bd9u64;
|
// difficulty = 0x085657254bd9u64;
|
||||||
let light = Light::new(486382);
|
let light = Light::new(&::std::env::temp_dir(), 486382);
|
||||||
let result = light_compute(&light, &hash, nonce);
|
let result = light_compute(&light, &hash, nonce);
|
||||||
assert_eq!(result.mix_hash[..], mix_hash[..]);
|
assert_eq!(result.mix_hash[..], mix_hash[..]);
|
||||||
assert_eq!(result.value[..], boundary[..]);
|
assert_eq!(result.value[..], boundary[..]);
|
||||||
@ -471,15 +472,16 @@ fn test_seed_compute_after_newer() {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_drop_old_data() {
|
fn test_drop_old_data() {
|
||||||
let first = Light::new(0).to_file().unwrap();
|
let path = ::std::env::temp_dir();
|
||||||
|
let first = Light::new(&path, 0).to_file().unwrap();
|
||||||
|
|
||||||
let second = Light::new(ETHASH_EPOCH_LENGTH).to_file().unwrap();
|
let second = Light::new(&path, ETHASH_EPOCH_LENGTH).to_file().unwrap();
|
||||||
assert!(fs::metadata(&first).is_ok());
|
assert!(fs::metadata(&first).is_ok());
|
||||||
|
|
||||||
let _ = Light::new(ETHASH_EPOCH_LENGTH * 2).to_file();
|
let _ = Light::new(&path, ETHASH_EPOCH_LENGTH * 2).to_file();
|
||||||
assert!(fs::metadata(&first).is_err());
|
assert!(fs::metadata(&first).is_err());
|
||||||
assert!(fs::metadata(&second).is_ok());
|
assert!(fs::metadata(&second).is_ok());
|
||||||
|
|
||||||
let _ = Light::new(ETHASH_EPOCH_LENGTH * 3).to_file();
|
let _ = Light::new(&path, ETHASH_EPOCH_LENGTH * 3).to_file();
|
||||||
assert!(fs::metadata(&second).is_err());
|
assert!(fs::metadata(&second).is_err());
|
||||||
}
|
}
|
||||||
|
@ -25,6 +25,7 @@ extern crate log;
|
|||||||
mod compute;
|
mod compute;
|
||||||
|
|
||||||
use std::mem;
|
use std::mem;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
use compute::Light;
|
use compute::Light;
|
||||||
pub use compute::{ETHASH_EPOCH_LENGTH, H256, ProofOfWork, SeedHashCompute, quick_get_difficulty, slow_get_seedhash};
|
pub use compute::{ETHASH_EPOCH_LENGTH, H256, ProofOfWork, SeedHashCompute, quick_get_difficulty, slow_get_seedhash};
|
||||||
|
|
||||||
@ -41,12 +42,14 @@ struct LightCache {
|
|||||||
/// Light/Full cache manager.
|
/// Light/Full cache manager.
|
||||||
pub struct EthashManager {
|
pub struct EthashManager {
|
||||||
cache: Mutex<LightCache>,
|
cache: Mutex<LightCache>,
|
||||||
|
cache_dir: PathBuf,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl EthashManager {
|
impl EthashManager {
|
||||||
/// Create a new new instance of ethash manager
|
/// Create a new new instance of ethash manager
|
||||||
pub fn new() -> EthashManager {
|
pub fn new<T: AsRef<Path>>(cache_dir: T) -> EthashManager {
|
||||||
EthashManager {
|
EthashManager {
|
||||||
|
cache_dir: cache_dir.as_ref().to_path_buf(),
|
||||||
cache: Mutex::new(LightCache {
|
cache: Mutex::new(LightCache {
|
||||||
recent_epoch: None,
|
recent_epoch: None,
|
||||||
recent: None,
|
recent: None,
|
||||||
@ -88,11 +91,11 @@ impl EthashManager {
|
|||||||
};
|
};
|
||||||
match light {
|
match light {
|
||||||
None => {
|
None => {
|
||||||
let light = match Light::from_file(block_number) {
|
let light = match Light::from_file(&self.cache_dir, block_number) {
|
||||||
Ok(light) => Arc::new(light),
|
Ok(light) => Arc::new(light),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
debug!("Light cache file not found for {}:{}", block_number, e);
|
debug!("Light cache file not found for {}:{}", block_number, e);
|
||||||
let light = Light::new(block_number);
|
let light = Light::new(&self.cache_dir, block_number);
|
||||||
if let Err(e) = light.to_file() {
|
if let Err(e) = light.to_file() {
|
||||||
warn!("Light cache file write error: {}", e);
|
warn!("Light cache file write error: {}", e);
|
||||||
}
|
}
|
||||||
@ -112,7 +115,7 @@ impl EthashManager {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_lru() {
|
fn test_lru() {
|
||||||
let ethash = EthashManager::new();
|
let ethash = EthashManager::new(&::std::env::temp_dir());
|
||||||
let hash = [0u8; 32];
|
let hash = [0u8; 32];
|
||||||
ethash.compute_light(1, &hash, 1);
|
ethash.compute_light(1, &hash, 1);
|
||||||
ethash.compute_light(50000, &hash, 1);
|
ethash.compute_light(50000, &hash, 1);
|
||||||
|
@ -26,7 +26,7 @@ use ethcore::receipt::Receipt;
|
|||||||
|
|
||||||
use stats::Corpus;
|
use stats::Corpus;
|
||||||
use time::{SteadyTime, Duration};
|
use time::{SteadyTime, Duration};
|
||||||
use util::{U256, H256};
|
use util::{U256, H256, HeapSizeOf};
|
||||||
use util::cache::MemoryLruCache;
|
use util::cache::MemoryLruCache;
|
||||||
|
|
||||||
/// Configuration for how much data to cache.
|
/// Configuration for how much data to cache.
|
||||||
@ -153,6 +153,22 @@ impl Cache {
|
|||||||
pub fn set_gas_price_corpus(&mut self, corpus: Corpus<U256>) {
|
pub fn set_gas_price_corpus(&mut self, corpus: Corpus<U256>) {
|
||||||
self.corpus = Some((corpus, SteadyTime::now()))
|
self.corpus = Some((corpus, SteadyTime::now()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the memory used.
|
||||||
|
pub fn mem_used(&self) -> usize {
|
||||||
|
self.heap_size_of_children()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HeapSizeOf for Cache {
|
||||||
|
fn heap_size_of_children(&self) -> usize {
|
||||||
|
self.headers.current_size()
|
||||||
|
+ self.canon_hashes.current_size()
|
||||||
|
+ self.bodies.current_size()
|
||||||
|
+ self.receipts.current_size()
|
||||||
|
+ self.chain_score.current_size()
|
||||||
|
// TODO: + corpus
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
@ -44,7 +44,7 @@ mod header_chain;
|
|||||||
mod service;
|
mod service;
|
||||||
|
|
||||||
/// Configuration for the light client.
|
/// Configuration for the light client.
|
||||||
#[derive(Debug, Default, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
/// Verification queue config.
|
/// Verification queue config.
|
||||||
pub queue: queue::Config,
|
pub queue: queue::Config,
|
||||||
@ -56,6 +56,21 @@ pub struct Config {
|
|||||||
pub db_compaction: CompactionProfile,
|
pub db_compaction: CompactionProfile,
|
||||||
/// Should db have WAL enabled?
|
/// Should db have WAL enabled?
|
||||||
pub db_wal: bool,
|
pub db_wal: bool,
|
||||||
|
/// Should it do full verification of blocks?
|
||||||
|
pub verify_full: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Config {
|
||||||
|
fn default() -> Config {
|
||||||
|
Config {
|
||||||
|
queue: Default::default(),
|
||||||
|
chain_column: None,
|
||||||
|
db_cache_size: None,
|
||||||
|
db_compaction: CompactionProfile::default(),
|
||||||
|
db_wal: true,
|
||||||
|
verify_full: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Trait for interacting with the header chain abstractly.
|
/// Trait for interacting with the header chain abstractly.
|
||||||
@ -109,6 +124,9 @@ pub trait LightChainClient: Send + Sync {
|
|||||||
|
|
||||||
/// Get the EIP-86 transition block number.
|
/// Get the EIP-86 transition block number.
|
||||||
fn eip86_transition(&self) -> u64;
|
fn eip86_transition(&self) -> u64;
|
||||||
|
|
||||||
|
/// Get a report of import activity since the last call.
|
||||||
|
fn report(&self) -> ClientReport;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An actor listening to light chain events.
|
/// An actor listening to light chain events.
|
||||||
@ -141,6 +159,7 @@ pub struct Client {
|
|||||||
import_lock: Mutex<()>,
|
import_lock: Mutex<()>,
|
||||||
db: Arc<KeyValueDB>,
|
db: Arc<KeyValueDB>,
|
||||||
listeners: RwLock<Vec<Weak<LightChainNotify>>>,
|
listeners: RwLock<Vec<Weak<LightChainNotify>>>,
|
||||||
|
verify_full: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Client {
|
impl Client {
|
||||||
@ -156,6 +175,7 @@ impl Client {
|
|||||||
import_lock: Mutex::new(()),
|
import_lock: Mutex::new(()),
|
||||||
db: db,
|
db: db,
|
||||||
listeners: RwLock::new(vec![]),
|
listeners: RwLock::new(vec![]),
|
||||||
|
verify_full: config.verify_full,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -263,6 +283,14 @@ impl Client {
|
|||||||
for verified_header in self.queue.drain(MAX) {
|
for verified_header in self.queue.drain(MAX) {
|
||||||
let (num, hash) = (verified_header.number(), verified_header.hash());
|
let (num, hash) = (verified_header.number(), verified_header.hash());
|
||||||
|
|
||||||
|
if self.verify_full && !self.check_header(&mut bad, &verified_header) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: `epoch_end_signal`, `is_epoch_end`.
|
||||||
|
// proofs we get from the network would be _complete_, whereas we need
|
||||||
|
// _incomplete_ signals
|
||||||
|
|
||||||
let mut tx = self.db.transaction();
|
let mut tx = self.db.transaction();
|
||||||
let pending = match self.chain.insert(&mut tx, verified_header) {
|
let pending = match self.chain.insert(&mut tx, verified_header) {
|
||||||
Ok(pending) => {
|
Ok(pending) => {
|
||||||
@ -273,14 +301,16 @@ impl Client {
|
|||||||
Err(e) => {
|
Err(e) => {
|
||||||
debug!(target: "client", "Error importing header {:?}: {}", (num, hash), e);
|
debug!(target: "client", "Error importing header {:?}: {}", (num, hash), e);
|
||||||
bad.push(hash);
|
bad.push(hash);
|
||||||
break;
|
continue;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
self.db.write_buffered(tx);
|
self.db.write_buffered(tx);
|
||||||
self.chain.apply_pending(pending);
|
self.chain.apply_pending(pending);
|
||||||
if let Err(e) = self.db.flush() {
|
}
|
||||||
panic!("Database flush failed: {}. Check disk health and space.", e);
|
|
||||||
}
|
if let Err(e) = self.db.flush() {
|
||||||
|
panic!("Database flush failed: {}. Check disk health and space.", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.queue.mark_as_bad(&bad);
|
self.queue.mark_as_bad(&bad);
|
||||||
@ -291,7 +321,7 @@ impl Client {
|
|||||||
|
|
||||||
/// Get a report about blocks imported.
|
/// Get a report about blocks imported.
|
||||||
pub fn report(&self) -> ClientReport {
|
pub fn report(&self) -> ClientReport {
|
||||||
::std::mem::replace(&mut *self.report.write(), ClientReport::default())
|
self.report.read().clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get blockchain mem usage in bytes.
|
/// Get blockchain mem usage in bytes.
|
||||||
@ -350,6 +380,37 @@ impl Client {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// return true if should skip, false otherwise. may push onto bad if
|
||||||
|
// should skip.
|
||||||
|
fn check_header(&self, bad: &mut Vec<H256>, verified_header: &Header) -> bool {
|
||||||
|
let hash = verified_header.hash();
|
||||||
|
let parent_header = match self.chain.block_header(BlockId::Hash(*verified_header.parent_hash())) {
|
||||||
|
Some(header) => header,
|
||||||
|
None => return false, // skip import of block with missing parent.
|
||||||
|
};
|
||||||
|
|
||||||
|
// Verify Block Family
|
||||||
|
let verify_family_result = self.engine.verify_block_family(&verified_header, &parent_header.decode(), None);
|
||||||
|
if let Err(e) = verify_family_result {
|
||||||
|
warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}",
|
||||||
|
verified_header.number(), verified_header.hash(), e);
|
||||||
|
bad.push(hash);
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
// "external" verification.
|
||||||
|
let verify_external_result = self.engine.verify_block_external(&verified_header, None);
|
||||||
|
if let Err(e) = verify_external_result {
|
||||||
|
warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}",
|
||||||
|
verified_header.number(), verified_header.hash(), e);
|
||||||
|
|
||||||
|
bad.push(hash);
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LightChainClient for Client {
|
impl LightChainClient for Client {
|
||||||
@ -414,4 +475,8 @@ impl LightChainClient for Client {
|
|||||||
fn eip86_transition(&self) -> u64 {
|
fn eip86_transition(&self) -> u64 {
|
||||||
self.engine().params().eip86_transition
|
self.engine().params().eip86_transition
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn report(&self) -> ClientReport {
|
||||||
|
Client::report(self)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -54,10 +54,10 @@
|
|||||||
"0x00521965e7bd230323c423d96c657db5b79d099f": { "balance": "1606938044258990275541962092341162602522202993782792835301376" }
|
"0x00521965e7bd230323c423d96c657db5b79d099f": { "balance": "1606938044258990275541962092341162602522202993782792835301376" }
|
||||||
},
|
},
|
||||||
"nodes": [
|
"nodes": [
|
||||||
"enode://c005dd308256c60fab247813d8bf6d6e81f9cd354287837eb1c2fcf294adaa913a3208e88900ef5c55a8cba7042c301d80503edec2ad3f92a72e241ee6743854@192.241.230.87:30303",
|
"enode://0518a3d35d4a7b3e8c433e7ffd2355d84a1304ceb5ef349787b556197f0c87fad09daed760635b97d52179d645d3e6d16a37d2cc0a9945c2ddf585684beb39ac@40.68.248.100:30303",
|
||||||
"enode://48caeceb2724f2f71406990aa81efe87f8c53f26441d891473da2ae50cc138f238addc0e46b5aee240db55de8c711daac53d7b32a3f13e30edb86a3ca7c2700b@138.68.143.220:30303",
|
"enode://dcf984764db421fa0cd8dc7fc02ae378545723abb94d179f55325514cc30185eaea3dcefde6e358b7cdbe970c50b7c49e841618713a9a72d6f3f59ad9949ec6b@52.165.239.18:30303",
|
||||||
"enode://85705212fd28ebdd56669fb55e958feb9d81f74fe76c82f867564b6c2995e69f596df0f588eba16f1a43b69ce06485d68231a0c83fed8469b41eba0e390c126f@139.59.146.42:30303",
|
"enode://7e2e7f00784f516939f94e22bdc6cf96153603ca2b5df1c7cc0f90a38e7a2f218ffb1c05b156835e8b49086d11fdd1b3e2965be16baa55204167aa9bf536a4d9@52.243.47.56:30303",
|
||||||
"enode://2aa81bd0a761cd4f02c934dcf3f81c5b65953e51ab5ba03ceb1f125eb06418a1cdffb1c9d01871aa7bd456f3fce35e745608189ad1164f72b2161634b0c3f6ea@188.166.240.190:30303",
|
"enode://d51b3e98bf35addf2f1d0ea1ffc90483e24d7c60b0fb3be1701e818f3d6778c06e53fdec737a534fe222956296f9d6e909baa025916a94601897e5c7136a7d95@40.71.221.215:30303",
|
||||||
"enode://c5900cdd6d20795d58372f42dfbab9d664c27bb97e9c27972741942736e919122f9bac28e74cbc58e4ff195475ea90d9880b71a37af5b5a8cb41d843f765cff8@174.138.79.48:30303"
|
"enode://419d42e300e8fd379ff6d045d93d7e66a091441e7b3c9f1d3d10088d8634ad37721e6bf86148f78c3f1b9f1360dc566ca8ee830b2d2079bc9f7171ea6152bb64@52.166.117.77:30303"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
@ -112,6 +112,22 @@ impl ClientReport {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<'a> ::std::ops::Sub<&'a ClientReport> for ClientReport {
|
||||||
|
type Output = Self;
|
||||||
|
|
||||||
|
fn sub(mut self, other: &'a ClientReport) -> Self {
|
||||||
|
let higher_mem = ::std::cmp::max(self.state_db_mem, other.state_db_mem);
|
||||||
|
let lower_mem = ::std::cmp::min(self.state_db_mem, other.state_db_mem);
|
||||||
|
|
||||||
|
self.blocks_imported -= other.blocks_imported;
|
||||||
|
self.transactions_applied -= other.transactions_applied;
|
||||||
|
self.gas_processed = self.gas_processed - other.gas_processed;
|
||||||
|
self.state_db_mem = higher_mem - lower_mem;
|
||||||
|
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
struct SleepState {
|
struct SleepState {
|
||||||
last_activity: Option<Instant>,
|
last_activity: Option<Instant>,
|
||||||
last_autosleep: Option<Instant>,
|
last_autosleep: Option<Instant>,
|
||||||
@ -1702,6 +1718,33 @@ impl MiningBlockChainClient for Client {
|
|||||||
open_block
|
open_block
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn reopen_block(&self, block: ClosedBlock) -> OpenBlock {
|
||||||
|
let engine = &*self.engine;
|
||||||
|
let mut block = block.reopen(engine);
|
||||||
|
let max_uncles = engine.maximum_uncle_count();
|
||||||
|
if block.uncles().len() < max_uncles {
|
||||||
|
let chain = self.chain.read();
|
||||||
|
let h = chain.best_block_hash();
|
||||||
|
// Add new uncles
|
||||||
|
let uncles = chain
|
||||||
|
.find_uncle_hashes(&h, engine.maximum_uncle_age())
|
||||||
|
.unwrap_or_else(Vec::new);
|
||||||
|
|
||||||
|
for h in uncles {
|
||||||
|
if !block.uncles().iter().any(|header| header.hash() == h) {
|
||||||
|
let uncle = chain.block_header(&h).expect("find_uncle_hashes only returns hashes for existing headers; qed");
|
||||||
|
block.push_uncle(uncle).expect("pushing up to maximum_uncle_count;
|
||||||
|
push_uncle is not ok only if more than maximum_uncle_count is pushed;
|
||||||
|
so all push_uncle are Ok;
|
||||||
|
qed");
|
||||||
|
if block.uncles().len() >= max_uncles { break }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
block
|
||||||
|
}
|
||||||
|
|
||||||
fn vm_factory(&self) -> &EvmFactory {
|
fn vm_factory(&self) -> &EvmFactory {
|
||||||
&self.factories.vm
|
&self.factories.vm
|
||||||
}
|
}
|
||||||
|
@ -44,7 +44,7 @@ use types::mode::Mode;
|
|||||||
use types::pruning_info::PruningInfo;
|
use types::pruning_info::PruningInfo;
|
||||||
|
|
||||||
use verification::queue::QueueInfo;
|
use verification::queue::QueueInfo;
|
||||||
use block::{OpenBlock, SealedBlock};
|
use block::{OpenBlock, SealedBlock, ClosedBlock};
|
||||||
use executive::Executed;
|
use executive::Executed;
|
||||||
use error::CallError;
|
use error::CallError;
|
||||||
use trace::LocalizedTrace;
|
use trace::LocalizedTrace;
|
||||||
@ -381,6 +381,10 @@ impl MiningBlockChainClient for TestBlockChainClient {
|
|||||||
open_block
|
open_block
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn reopen_block(&self, block: ClosedBlock) -> OpenBlock {
|
||||||
|
block.reopen(&*self.spec.engine)
|
||||||
|
}
|
||||||
|
|
||||||
fn vm_factory(&self) -> &EvmFactory {
|
fn vm_factory(&self) -> &EvmFactory {
|
||||||
&self.vm_factory
|
&self.vm_factory
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@ use util::{U256, Address, H256, H2048, Bytes, Itertools};
|
|||||||
use util::hashdb::DBValue;
|
use util::hashdb::DBValue;
|
||||||
use blockchain::TreeRoute;
|
use blockchain::TreeRoute;
|
||||||
use verification::queue::QueueInfo as BlockQueueInfo;
|
use verification::queue::QueueInfo as BlockQueueInfo;
|
||||||
use block::{OpenBlock, SealedBlock};
|
use block::{OpenBlock, SealedBlock, ClosedBlock};
|
||||||
use header::{BlockNumber};
|
use header::{BlockNumber};
|
||||||
use transaction::{LocalizedTransaction, PendingTransaction, SignedTransaction};
|
use transaction::{LocalizedTransaction, PendingTransaction, SignedTransaction};
|
||||||
use transaction_import::TransactionImportResult;
|
use transaction_import::TransactionImportResult;
|
||||||
@ -288,6 +288,9 @@ pub trait MiningBlockChainClient: BlockChainClient {
|
|||||||
extra_data: Bytes
|
extra_data: Bytes
|
||||||
) -> OpenBlock;
|
) -> OpenBlock;
|
||||||
|
|
||||||
|
/// Reopens an OpenBlock and updates uncles.
|
||||||
|
fn reopen_block(&self, block: ClosedBlock) -> OpenBlock;
|
||||||
|
|
||||||
/// Returns EvmFactory.
|
/// Returns EvmFactory.
|
||||||
fn vm_factory(&self) -> &EvmFactory;
|
fn vm_factory(&self) -> &EvmFactory;
|
||||||
|
|
||||||
|
@ -266,7 +266,7 @@ mod tests {
|
|||||||
/// Create a new test chain spec with `BasicAuthority` consensus engine.
|
/// Create a new test chain spec with `BasicAuthority` consensus engine.
|
||||||
fn new_test_authority() -> Spec {
|
fn new_test_authority() -> Spec {
|
||||||
let bytes: &[u8] = include_bytes!("../../res/basic_authority.json");
|
let bytes: &[u8] = include_bytes!("../../res/basic_authority.json");
|
||||||
Spec::load(bytes).expect("invalid chain spec")
|
Spec::load(::std::env::temp_dir(), bytes).expect("invalid chain spec")
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -386,7 +386,6 @@ pub trait Engine : Sync + Send {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Common engine utilities
|
/// Common engine utilities
|
||||||
pub mod common {
|
pub mod common {
|
||||||
use block::ExecutedBlock;
|
use block::ExecutedBlock;
|
||||||
|
@ -129,6 +129,8 @@ pub enum BlockError {
|
|||||||
UncleIsBrother(OutOfBounds<BlockNumber>),
|
UncleIsBrother(OutOfBounds<BlockNumber>),
|
||||||
/// An uncle is already in the chain.
|
/// An uncle is already in the chain.
|
||||||
UncleInChain(H256),
|
UncleInChain(H256),
|
||||||
|
/// An uncle is included twice.
|
||||||
|
DuplicateUncle(H256),
|
||||||
/// An uncle has a parent not in the chain.
|
/// An uncle has a parent not in the chain.
|
||||||
UncleParentNotInChain(H256),
|
UncleParentNotInChain(H256),
|
||||||
/// State root header field is invalid.
|
/// State root header field is invalid.
|
||||||
@ -188,6 +190,7 @@ impl fmt::Display for BlockError {
|
|||||||
UncleTooOld(ref oob) => format!("Uncle block is too old. {}", oob),
|
UncleTooOld(ref oob) => format!("Uncle block is too old. {}", oob),
|
||||||
UncleIsBrother(ref oob) => format!("Uncle from same generation as block. {}", oob),
|
UncleIsBrother(ref oob) => format!("Uncle from same generation as block. {}", oob),
|
||||||
UncleInChain(ref hash) => format!("Uncle {} already in chain", hash),
|
UncleInChain(ref hash) => format!("Uncle {} already in chain", hash),
|
||||||
|
DuplicateUncle(ref hash) => format!("Uncle {} already in the header", hash),
|
||||||
UncleParentNotInChain(ref hash) => format!("Uncle {} has a parent not in the chain", hash),
|
UncleParentNotInChain(ref hash) => format!("Uncle {} has a parent not in the chain", hash),
|
||||||
InvalidStateRoot(ref mis) => format!("Invalid state root in header: {}", mis),
|
InvalidStateRoot(ref mis) => format!("Invalid state root in header: {}", mis),
|
||||||
InvalidGasUsed(ref mis) => format!("Invalid gas used in header: {}", mis),
|
InvalidGasUsed(ref mis) => format!("Invalid gas used in header: {}", mis),
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::path::Path;
|
||||||
use ethash::{quick_get_difficulty, slow_get_seedhash, EthashManager};
|
use ethash::{quick_get_difficulty, slow_get_seedhash, EthashManager};
|
||||||
use util::*;
|
use util::*;
|
||||||
use block::*;
|
use block::*;
|
||||||
@ -24,7 +25,7 @@ use header::{Header, BlockNumber};
|
|||||||
use state::CleanupMode;
|
use state::CleanupMode;
|
||||||
use spec::CommonParams;
|
use spec::CommonParams;
|
||||||
use transaction::UnverifiedTransaction;
|
use transaction::UnverifiedTransaction;
|
||||||
use engines::Engine;
|
use engines::{self, Engine};
|
||||||
use evm::Schedule;
|
use evm::Schedule;
|
||||||
use ethjson;
|
use ethjson;
|
||||||
use rlp::{self, UntrustedRlp};
|
use rlp::{self, UntrustedRlp};
|
||||||
@ -147,12 +148,17 @@ pub struct Ethash {
|
|||||||
|
|
||||||
impl Ethash {
|
impl Ethash {
|
||||||
/// Create a new instance of Ethash engine
|
/// Create a new instance of Ethash engine
|
||||||
pub fn new(params: CommonParams, ethash_params: EthashParams, builtins: BTreeMap<Address, Builtin>) -> Arc<Self> {
|
pub fn new<T: AsRef<Path>>(
|
||||||
|
cache_dir: T,
|
||||||
|
params: CommonParams,
|
||||||
|
ethash_params: EthashParams,
|
||||||
|
builtins: BTreeMap<Address, Builtin>,
|
||||||
|
) -> Arc<Self> {
|
||||||
Arc::new(Ethash {
|
Arc::new(Ethash {
|
||||||
params: params,
|
params,
|
||||||
ethash_params: ethash_params,
|
ethash_params,
|
||||||
builtins: builtins,
|
builtins,
|
||||||
pow: EthashManager::new(),
|
pow: EthashManager::new(cache_dir),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -165,7 +171,7 @@ impl Ethash {
|
|||||||
// for any block in the chain.
|
// for any block in the chain.
|
||||||
// in the future, we might move the Ethash epoch
|
// in the future, we might move the Ethash epoch
|
||||||
// caching onto this mechanism as well.
|
// caching onto this mechanism as well.
|
||||||
impl ::engines::EpochVerifier for Arc<Ethash> {
|
impl engines::EpochVerifier for Arc<Ethash> {
|
||||||
fn verify_light(&self, _header: &Header) -> Result<(), Error> { Ok(()) }
|
fn verify_light(&self, _header: &Header) -> Result<(), Error> { Ok(()) }
|
||||||
fn verify_heavy(&self, header: &Header) -> Result<(), Error> {
|
fn verify_heavy(&self, header: &Header) -> Result<(), Error> {
|
||||||
self.verify_block_unordered(header, None)
|
self.verify_block_unordered(header, None)
|
||||||
@ -262,7 +268,7 @@ impl Engine for Arc<Ethash> {
|
|||||||
_begins_epoch: bool,
|
_begins_epoch: bool,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let parent_hash = block.fields().header.parent_hash().clone();
|
let parent_hash = block.fields().header.parent_hash().clone();
|
||||||
::engines::common::push_last_hash(block, last_hashes, self, &parent_hash)?;
|
engines::common::push_last_hash(block, last_hashes, self, &parent_hash)?;
|
||||||
if block.fields().header.number() == self.ethash_params.dao_hardfork_transition {
|
if block.fields().header.number() == self.ethash_params.dao_hardfork_transition {
|
||||||
let state = block.fields_mut().state;
|
let state = block.fields_mut().state;
|
||||||
for child in &self.ethash_params.dao_hardfork_accounts {
|
for child in &self.ethash_params.dao_hardfork_accounts {
|
||||||
@ -404,8 +410,8 @@ impl Engine for Arc<Ethash> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn epoch_verifier<'a>(&self, _header: &Header, _proof: &'a [u8]) -> ::engines::ConstructedVerifier<'a> {
|
fn epoch_verifier<'a>(&self, _header: &Header, _proof: &'a [u8]) -> engines::ConstructedVerifier<'a> {
|
||||||
::engines::ConstructedVerifier::Trusted(Box::new(self.clone()))
|
engines::ConstructedVerifier::Trusted(Box::new(self.clone()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn snapshot_components(&self) -> Option<Box<::snapshot::SnapshotComponents>> {
|
fn snapshot_components(&self) -> Option<Box<::snapshot::SnapshotComponents>> {
|
||||||
@ -558,13 +564,18 @@ mod tests {
|
|||||||
use engines::Engine;
|
use engines::Engine;
|
||||||
use error::{BlockError, Error};
|
use error::{BlockError, Error};
|
||||||
use header::Header;
|
use header::Header;
|
||||||
|
use spec::Spec;
|
||||||
use super::super::{new_morden, new_homestead_test};
|
use super::super::{new_morden, new_homestead_test};
|
||||||
use super::{Ethash, EthashParams, PARITY_GAS_LIMIT_DETERMINANT, ecip1017_eras_block_reward};
|
use super::{Ethash, EthashParams, PARITY_GAS_LIMIT_DETERMINANT, ecip1017_eras_block_reward};
|
||||||
use rlp;
|
use rlp;
|
||||||
|
|
||||||
|
fn test_spec() -> Spec {
|
||||||
|
new_morden(&::std::env::temp_dir())
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn on_close_block() {
|
fn on_close_block() {
|
||||||
let spec = new_morden();
|
let spec = test_spec();
|
||||||
let engine = &*spec.engine;
|
let engine = &*spec.engine;
|
||||||
let genesis_header = spec.genesis_header();
|
let genesis_header = spec.genesis_header();
|
||||||
let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
|
let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
|
||||||
@ -576,7 +587,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn on_close_block_with_uncle() {
|
fn on_close_block_with_uncle() {
|
||||||
let spec = new_morden();
|
let spec = test_spec();
|
||||||
let engine = &*spec.engine;
|
let engine = &*spec.engine;
|
||||||
let genesis_header = spec.genesis_header();
|
let genesis_header = spec.genesis_header();
|
||||||
let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
|
let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
|
||||||
@ -594,14 +605,14 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn has_valid_metadata() {
|
fn has_valid_metadata() {
|
||||||
let engine = new_morden().engine;
|
let engine = test_spec().engine;
|
||||||
assert!(!engine.name().is_empty());
|
assert!(!engine.name().is_empty());
|
||||||
assert!(engine.version().major >= 1);
|
assert!(engine.version().major >= 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_return_schedule() {
|
fn can_return_schedule() {
|
||||||
let engine = new_morden().engine;
|
let engine = test_spec().engine;
|
||||||
let schedule = engine.schedule(10000000);
|
let schedule = engine.schedule(10000000);
|
||||||
assert!(schedule.stack_limit > 0);
|
assert!(schedule.stack_limit > 0);
|
||||||
|
|
||||||
@ -611,8 +622,8 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_do_seal_verification_fail() {
|
fn can_do_seal_verification_fail() {
|
||||||
let engine = new_morden().engine;
|
let engine = test_spec().engine;
|
||||||
//let engine = Ethash::new_test(new_morden());
|
//let engine = Ethash::new_test(test_spec());
|
||||||
let header: Header = Header::default();
|
let header: Header = Header::default();
|
||||||
|
|
||||||
let verify_result = engine.verify_block_basic(&header, None);
|
let verify_result = engine.verify_block_basic(&header, None);
|
||||||
@ -626,7 +637,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_do_difficulty_verification_fail() {
|
fn can_do_difficulty_verification_fail() {
|
||||||
let engine = new_morden().engine;
|
let engine = test_spec().engine;
|
||||||
let mut header: Header = Header::default();
|
let mut header: Header = Header::default();
|
||||||
header.set_seal(vec![rlp::encode(&H256::zero()).into_vec(), rlp::encode(&H64::zero()).into_vec()]);
|
header.set_seal(vec![rlp::encode(&H256::zero()).into_vec(), rlp::encode(&H64::zero()).into_vec()]);
|
||||||
|
|
||||||
@ -641,7 +652,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_do_proof_of_work_verification_fail() {
|
fn can_do_proof_of_work_verification_fail() {
|
||||||
let engine = new_morden().engine;
|
let engine = test_spec().engine;
|
||||||
let mut header: Header = Header::default();
|
let mut header: Header = Header::default();
|
||||||
header.set_seal(vec![rlp::encode(&H256::zero()).into_vec(), rlp::encode(&H64::zero()).into_vec()]);
|
header.set_seal(vec![rlp::encode(&H256::zero()).into_vec(), rlp::encode(&H64::zero()).into_vec()]);
|
||||||
header.set_difficulty(U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffaaaaaaaaaaaaaaaaaaaa").unwrap());
|
header.set_difficulty(U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffaaaaaaaaaaaaaaaaaaaa").unwrap());
|
||||||
@ -657,7 +668,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_do_seal_unordered_verification_fail() {
|
fn can_do_seal_unordered_verification_fail() {
|
||||||
let engine = new_morden().engine;
|
let engine = test_spec().engine;
|
||||||
let header: Header = Header::default();
|
let header: Header = Header::default();
|
||||||
|
|
||||||
let verify_result = engine.verify_block_unordered(&header, None);
|
let verify_result = engine.verify_block_unordered(&header, None);
|
||||||
@ -671,7 +682,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_do_seal256_verification_fail() {
|
fn can_do_seal256_verification_fail() {
|
||||||
let engine = new_morden().engine;
|
let engine = test_spec().engine;
|
||||||
let mut header: Header = Header::default();
|
let mut header: Header = Header::default();
|
||||||
header.set_seal(vec![rlp::encode(&H256::zero()).into_vec(), rlp::encode(&H64::zero()).into_vec()]);
|
header.set_seal(vec![rlp::encode(&H256::zero()).into_vec(), rlp::encode(&H64::zero()).into_vec()]);
|
||||||
let verify_result = engine.verify_block_unordered(&header, None);
|
let verify_result = engine.verify_block_unordered(&header, None);
|
||||||
@ -685,7 +696,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_do_proof_of_work_unordered_verification_fail() {
|
fn can_do_proof_of_work_unordered_verification_fail() {
|
||||||
let engine = new_morden().engine;
|
let engine = test_spec().engine;
|
||||||
let mut header: Header = Header::default();
|
let mut header: Header = Header::default();
|
||||||
header.set_seal(vec![rlp::encode(&H256::from("b251bd2e0283d0658f2cadfdc8ca619b5de94eca5742725e2e757dd13ed7503d")).into_vec(), rlp::encode(&H64::zero()).into_vec()]);
|
header.set_seal(vec![rlp::encode(&H256::from("b251bd2e0283d0658f2cadfdc8ca619b5de94eca5742725e2e757dd13ed7503d")).into_vec(), rlp::encode(&H64::zero()).into_vec()]);
|
||||||
header.set_difficulty(U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffaaaaaaaaaaaaaaaaaaaa").unwrap());
|
header.set_difficulty(U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffaaaaaaaaaaaaaaaaaaaa").unwrap());
|
||||||
@ -701,7 +712,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_verify_block_family_genesis_fail() {
|
fn can_verify_block_family_genesis_fail() {
|
||||||
let engine = new_morden().engine;
|
let engine = test_spec().engine;
|
||||||
let header: Header = Header::default();
|
let header: Header = Header::default();
|
||||||
let parent_header: Header = Header::default();
|
let parent_header: Header = Header::default();
|
||||||
|
|
||||||
@ -716,7 +727,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_verify_block_family_difficulty_fail() {
|
fn can_verify_block_family_difficulty_fail() {
|
||||||
let engine = new_morden().engine;
|
let engine = test_spec().engine;
|
||||||
let mut header: Header = Header::default();
|
let mut header: Header = Header::default();
|
||||||
header.set_number(2);
|
header.set_number(2);
|
||||||
let mut parent_header: Header = Header::default();
|
let mut parent_header: Header = Header::default();
|
||||||
@ -733,7 +744,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_verify_block_family_gas_fail() {
|
fn can_verify_block_family_gas_fail() {
|
||||||
let engine = new_morden().engine;
|
let engine = test_spec().engine;
|
||||||
let mut header: Header = Header::default();
|
let mut header: Header = Header::default();
|
||||||
header.set_number(2);
|
header.set_number(2);
|
||||||
header.set_difficulty(U256::from_str("0000000000000000000000000000000000000000000000000000000000020000").unwrap());
|
header.set_difficulty(U256::from_str("0000000000000000000000000000000000000000000000000000000000020000").unwrap());
|
||||||
@ -763,7 +774,7 @@ mod tests {
|
|||||||
fn difficulty_frontier() {
|
fn difficulty_frontier() {
|
||||||
let spec = new_homestead_test();
|
let spec = new_homestead_test();
|
||||||
let ethparams = get_default_ethash_params();
|
let ethparams = get_default_ethash_params();
|
||||||
let ethash = Ethash::new(spec.params().clone(), ethparams, BTreeMap::new());
|
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
|
||||||
|
|
||||||
let mut parent_header = Header::default();
|
let mut parent_header = Header::default();
|
||||||
parent_header.set_number(1000000);
|
parent_header.set_number(1000000);
|
||||||
@ -781,7 +792,7 @@ mod tests {
|
|||||||
fn difficulty_homestead() {
|
fn difficulty_homestead() {
|
||||||
let spec = new_homestead_test();
|
let spec = new_homestead_test();
|
||||||
let ethparams = get_default_ethash_params();
|
let ethparams = get_default_ethash_params();
|
||||||
let ethash = Ethash::new(spec.params().clone(), ethparams, BTreeMap::new());
|
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
|
||||||
|
|
||||||
let mut parent_header = Header::default();
|
let mut parent_header = Header::default();
|
||||||
parent_header.set_number(1500000);
|
parent_header.set_number(1500000);
|
||||||
@ -838,7 +849,7 @@ mod tests {
|
|||||||
ecip1010_pause_transition: 3000000,
|
ecip1010_pause_transition: 3000000,
|
||||||
..get_default_ethash_params()
|
..get_default_ethash_params()
|
||||||
};
|
};
|
||||||
let ethash = Ethash::new(spec.params().clone(), ethparams, BTreeMap::new());
|
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
|
||||||
|
|
||||||
let mut parent_header = Header::default();
|
let mut parent_header = Header::default();
|
||||||
parent_header.set_number(3500000);
|
parent_header.set_number(3500000);
|
||||||
@ -872,7 +883,7 @@ mod tests {
|
|||||||
ecip1010_continue_transition: 5000000,
|
ecip1010_continue_transition: 5000000,
|
||||||
..get_default_ethash_params()
|
..get_default_ethash_params()
|
||||||
};
|
};
|
||||||
let ethash = Ethash::new(spec.params().clone(), ethparams, BTreeMap::new());
|
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
|
||||||
|
|
||||||
let mut parent_header = Header::default();
|
let mut parent_header = Header::default();
|
||||||
parent_header.set_number(5000102);
|
parent_header.set_number(5000102);
|
||||||
@ -917,7 +928,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn gas_limit_is_multiple_of_determinant() {
|
fn gas_limit_is_multiple_of_determinant() {
|
||||||
let spec = new_homestead_test();
|
let spec = new_homestead_test();
|
||||||
let ethash = Ethash::new(spec.params().clone(), get_default_ethash_params(), BTreeMap::new());
|
let ethparams = get_default_ethash_params();
|
||||||
|
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
|
||||||
let mut parent = Header::new();
|
let mut parent = Header::new();
|
||||||
let mut header = Header::new();
|
let mut header = Header::new();
|
||||||
header.set_number(1);
|
header.set_number(1);
|
||||||
@ -961,7 +973,7 @@ mod tests {
|
|||||||
fn difficulty_max_timestamp() {
|
fn difficulty_max_timestamp() {
|
||||||
let spec = new_homestead_test();
|
let spec = new_homestead_test();
|
||||||
let ethparams = get_default_ethash_params();
|
let ethparams = get_default_ethash_params();
|
||||||
let ethash = Ethash::new(spec.params().clone(), ethparams, BTreeMap::new());
|
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
|
||||||
|
|
||||||
let mut parent_header = Header::default();
|
let mut parent_header = Header::default();
|
||||||
parent_header.set_number(1000000);
|
parent_header.set_number(1000000);
|
||||||
@ -989,7 +1001,7 @@ mod tests {
|
|||||||
header.set_number(parent_header.number() + 1);
|
header.set_number(parent_header.number() + 1);
|
||||||
header.set_gas_limit(100_001.into());
|
header.set_gas_limit(100_001.into());
|
||||||
header.set_difficulty(ethparams.minimum_difficulty);
|
header.set_difficulty(ethparams.minimum_difficulty);
|
||||||
let ethash = Ethash::new(spec.params().clone(), ethparams, BTreeMap::new());
|
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
|
||||||
assert!(ethash.verify_block_family(&header, &parent_header, None).is_ok());
|
assert!(ethash.verify_block_family(&header, &parent_header, None).is_ok());
|
||||||
|
|
||||||
parent_header.set_number(9);
|
parent_header.set_number(9);
|
||||||
@ -1044,7 +1056,7 @@ mod tests {
|
|||||||
nonce: U256::zero(),
|
nonce: U256::zero(),
|
||||||
}.sign(keypair.secret(), None).into();
|
}.sign(keypair.secret(), None).into();
|
||||||
|
|
||||||
let ethash = Ethash::new(spec.params().clone(), ethparams, BTreeMap::new());
|
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
|
||||||
assert!(ethash.verify_transaction_basic(&tx1, &header).is_ok());
|
assert!(ethash.verify_transaction_basic(&tx1, &header).is_ok());
|
||||||
assert!(ethash.verify_transaction_basic(&tx2, &header).is_ok());
|
assert!(ethash.verify_transaction_basic(&tx2, &header).is_ok());
|
||||||
|
|
||||||
|
@ -27,6 +27,7 @@ pub mod denominations;
|
|||||||
pub use self::ethash::{Ethash};
|
pub use self::ethash::{Ethash};
|
||||||
pub use self::denominations::*;
|
pub use self::denominations::*;
|
||||||
|
|
||||||
|
use std::path::Path;
|
||||||
use super::spec::*;
|
use super::spec::*;
|
||||||
|
|
||||||
/// Most recent fork block that we support on Mainnet.
|
/// Most recent fork block that we support on Mainnet.
|
||||||
@ -38,51 +39,56 @@ pub const FORK_SUPPORTED_ROPSTEN: u64 = 10;
|
|||||||
/// Most recent fork block that we support on Kovan.
|
/// Most recent fork block that we support on Kovan.
|
||||||
pub const FORK_SUPPORTED_KOVAN: u64 = 0;
|
pub const FORK_SUPPORTED_KOVAN: u64 = 0;
|
||||||
|
|
||||||
fn load(b: &[u8]) -> Spec {
|
fn load<'a, T: 'a + Into<Option<&'a Path>>>(cache_dir: T, b: &[u8]) -> Spec {
|
||||||
Spec::load(b).expect("chain spec is invalid")
|
match cache_dir.into() {
|
||||||
|
Some(path) => Spec::load(path, b),
|
||||||
|
None => Spec::load(&::std::env::temp_dir(), b)
|
||||||
|
}.expect("chain spec is invalid")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new Foundation Olympic chain spec.
|
/// Create a new Foundation Olympic chain spec.
|
||||||
pub fn new_olympic() -> Spec { load(include_bytes!("../../res/ethereum/olympic.json")) }
|
pub fn new_olympic(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/olympic.json")) }
|
||||||
|
|
||||||
/// Create a new Foundation Mainnet chain spec.
|
/// Create a new Foundation Mainnet chain spec.
|
||||||
pub fn new_foundation() -> Spec { load(include_bytes!("../../res/ethereum/foundation.json")) }
|
pub fn new_foundation(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/foundation.json")) }
|
||||||
|
|
||||||
/// Create a new Classic Mainnet chain spec without the DAO hardfork.
|
/// Create a new Classic Mainnet chain spec without the DAO hardfork.
|
||||||
pub fn new_classic() -> Spec { load(include_bytes!("../../res/ethereum/classic.json")) }
|
pub fn new_classic(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/classic.json")) }
|
||||||
|
|
||||||
/// Create a new Expanse mainnet chain spec.
|
/// Create a new Expanse mainnet chain spec.
|
||||||
pub fn new_expanse() -> Spec { load(include_bytes!("../../res/ethereum/expanse.json")) }
|
pub fn new_expanse(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/expanse.json")) }
|
||||||
|
|
||||||
/// Create a new Kovan testnet chain spec.
|
/// Create a new Kovan testnet chain spec.
|
||||||
pub fn new_kovan() -> Spec { load(include_bytes!("../../res/ethereum/kovan.json")) }
|
pub fn new_kovan(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/kovan.json")) }
|
||||||
|
|
||||||
/// Create a new Foundation Frontier-era chain spec as though it never changes to Homestead.
|
|
||||||
pub fn new_frontier_test() -> Spec { load(include_bytes!("../../res/ethereum/frontier_test.json")) }
|
|
||||||
|
|
||||||
/// Create a new Foundation Homestead-era chain spec as though it never changed from Frontier.
|
|
||||||
pub fn new_homestead_test() -> Spec { load(include_bytes!("../../res/ethereum/homestead_test.json")) }
|
|
||||||
|
|
||||||
/// Create a new Foundation Homestead-EIP150-era chain spec as though it never changed from Homestead/Frontier.
|
|
||||||
pub fn new_eip150_test() -> Spec { load(include_bytes!("../../res/ethereum/eip150_test.json")) }
|
|
||||||
|
|
||||||
/// Create a new Foundation Homestead-EIP161-era chain spec as though it never changed from Homestead/Frontier.
|
|
||||||
pub fn new_eip161_test() -> Spec { load(include_bytes!("../../res/ethereum/eip161_test.json")) }
|
|
||||||
|
|
||||||
/// Create a new Foundation Frontier/Homestead/DAO chain spec with transition points at #5 and #8.
|
|
||||||
pub fn new_transition_test() -> Spec { load(include_bytes!("../../res/ethereum/transition_test.json")) }
|
|
||||||
|
|
||||||
/// Create a new Foundation Mainnet chain spec without genesis accounts.
|
|
||||||
pub fn new_mainnet_like() -> Spec { load(include_bytes!("../../res/ethereum/frontier_like_test.json")) }
|
|
||||||
|
|
||||||
/// Create a new Foundation Metropolis era spec.
|
|
||||||
pub fn new_metropolis_test() -> Spec { load(include_bytes!("../../res/ethereum/metropolis_test.json")) }
|
|
||||||
|
|
||||||
/// Create a new Foundation Ropsten chain spec.
|
/// Create a new Foundation Ropsten chain spec.
|
||||||
pub fn new_ropsten() -> Spec { load(include_bytes!("../../res/ethereum/ropsten.json")) }
|
pub fn new_ropsten(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/ropsten.json")) }
|
||||||
|
|
||||||
/// Create a new Morden chain spec.
|
/// Create a new Morden chain spec.
|
||||||
pub fn new_morden() -> Spec { load(include_bytes!("../../res/ethereum/morden.json")) }
|
pub fn new_morden(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/morden.json")) }
|
||||||
|
|
||||||
|
// For tests
|
||||||
|
|
||||||
|
/// Create a new Foundation Frontier-era chain spec as though it never changes to Homestead.
|
||||||
|
pub fn new_frontier_test() -> Spec { load(None, include_bytes!("../../res/ethereum/frontier_test.json")) }
|
||||||
|
|
||||||
|
/// Create a new Foundation Homestead-era chain spec as though it never changed from Frontier.
|
||||||
|
pub fn new_homestead_test() -> Spec { load(None, include_bytes!("../../res/ethereum/homestead_test.json")) }
|
||||||
|
|
||||||
|
/// Create a new Foundation Homestead-EIP150-era chain spec as though it never changed from Homestead/Frontier.
|
||||||
|
pub fn new_eip150_test() -> Spec { load(None, include_bytes!("../../res/ethereum/eip150_test.json")) }
|
||||||
|
|
||||||
|
/// Create a new Foundation Homestead-EIP161-era chain spec as though it never changed from Homestead/Frontier.
|
||||||
|
pub fn new_eip161_test() -> Spec { load(None, include_bytes!("../../res/ethereum/eip161_test.json")) }
|
||||||
|
|
||||||
|
/// Create a new Foundation Frontier/Homestead/DAO chain spec with transition points at #5 and #8.
|
||||||
|
pub fn new_transition_test() -> Spec { load(None, include_bytes!("../../res/ethereum/transition_test.json")) }
|
||||||
|
|
||||||
|
/// Create a new Foundation Mainnet chain spec without genesis accounts.
|
||||||
|
pub fn new_mainnet_like() -> Spec { load(None, include_bytes!("../../res/ethereum/frontier_like_test.json")) }
|
||||||
|
|
||||||
|
/// Create a new Foundation Metropolis era spec.
|
||||||
|
pub fn new_metropolis_test() -> Spec { load(None, include_bytes!("../../res/ethereum/metropolis_test.json")) }
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
@ -94,7 +100,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn ensure_db_good() {
|
fn ensure_db_good() {
|
||||||
let spec = new_morden();
|
let spec = new_morden(&::std::env::temp_dir());
|
||||||
let engine = &spec.engine;
|
let engine = &spec.engine;
|
||||||
let genesis_header = spec.genesis_header();
|
let genesis_header = spec.genesis_header();
|
||||||
let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
|
let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
|
||||||
@ -109,7 +115,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn morden() {
|
fn morden() {
|
||||||
let morden = new_morden();
|
let morden = new_morden(&::std::env::temp_dir());
|
||||||
|
|
||||||
assert_eq!(morden.state_root(), "f3f4696bbf3b3b07775128eb7a3763279a394e382130f27c21e70233e04946a9".into());
|
assert_eq!(morden.state_root(), "f3f4696bbf3b3b07775128eb7a3763279a394e382130f27c21e70233e04946a9".into());
|
||||||
let genesis = morden.genesis_block();
|
let genesis = morden.genesis_block();
|
||||||
@ -120,7 +126,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn frontier() {
|
fn frontier() {
|
||||||
let frontier = new_foundation();
|
let frontier = new_foundation(&::std::env::temp_dir());
|
||||||
|
|
||||||
assert_eq!(frontier.state_root(), "d7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544".into());
|
assert_eq!(frontier.state_root(), "d7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544".into());
|
||||||
let genesis = frontier.genesis_block();
|
let genesis = frontier.genesis_block();
|
||||||
@ -128,4 +134,23 @@ mod tests {
|
|||||||
|
|
||||||
let _ = frontier.engine;
|
let _ = frontier.engine;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn all_spec_files_valid() {
|
||||||
|
let tmp = ::std::env::temp_dir();
|
||||||
|
new_olympic(&tmp);
|
||||||
|
new_foundation(&tmp);
|
||||||
|
new_classic(&tmp);
|
||||||
|
new_expanse(&tmp);
|
||||||
|
new_kovan(&tmp);
|
||||||
|
new_ropsten(&tmp);
|
||||||
|
new_morden(&tmp);
|
||||||
|
new_frontier_test();
|
||||||
|
new_homestead_test();
|
||||||
|
new_eip150_test();
|
||||||
|
new_eip161_test();
|
||||||
|
new_transition_test();
|
||||||
|
new_mainnet_like();
|
||||||
|
new_metropolis_test();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -129,8 +129,11 @@ pub trait Ext {
|
|||||||
/// Increments sstore refunds count by 1.
|
/// Increments sstore refunds count by 1.
|
||||||
fn inc_sstore_clears(&mut self);
|
fn inc_sstore_clears(&mut self);
|
||||||
|
|
||||||
|
/// Decide if any more operations should be traced. Passthrough for the VM trace.
|
||||||
|
fn trace_next_instruction(&mut self, _pc: usize, _instruction: u8) -> bool { false }
|
||||||
|
|
||||||
/// Prepare to trace an operation. Passthrough for the VM trace.
|
/// Prepare to trace an operation. Passthrough for the VM trace.
|
||||||
fn trace_prepare_execute(&mut self, _pc: usize, _instruction: u8, _gas_cost: &U256) -> bool { false }
|
fn trace_prepare_execute(&mut self, _pc: usize, _instruction: u8, _gas_cost: U256) {}
|
||||||
|
|
||||||
/// Trace the finalised execution of a single instruction.
|
/// Trace the finalised execution of a single instruction.
|
||||||
fn trace_executed(&mut self, _gas_used: U256, _stack_push: &[U256], _mem_diff: Option<(usize, &[u8])>, _store_diff: Option<(U256, U256)>) {}
|
fn trace_executed(&mut self, _gas_used: U256, _stack_push: &[U256], _mem_diff: Option<(usize, &[u8])>, _store_diff: Option<(U256, U256)>) {}
|
||||||
|
@ -111,6 +111,7 @@ impl<Cost: CostType> evm::Evm for Interpreter<Cost> {
|
|||||||
self.mem.clear();
|
self.mem.clear();
|
||||||
|
|
||||||
let mut informant = informant::EvmInformant::new(ext.depth());
|
let mut informant = informant::EvmInformant::new(ext.depth());
|
||||||
|
let mut do_trace = true;
|
||||||
|
|
||||||
let code = ¶ms.code.as_ref().expect("exec always called with code; qed");
|
let code = ¶ms.code.as_ref().expect("exec always called with code; qed");
|
||||||
let mut valid_jump_destinations = None;
|
let mut valid_jump_destinations = None;
|
||||||
@ -124,13 +125,17 @@ impl<Cost: CostType> evm::Evm for Interpreter<Cost> {
|
|||||||
let instruction = code[reader.position];
|
let instruction = code[reader.position];
|
||||||
reader.position += 1;
|
reader.position += 1;
|
||||||
|
|
||||||
|
// TODO: make compile-time removable if too much of a performance hit.
|
||||||
|
do_trace = do_trace && ext.trace_next_instruction(reader.position - 1, instruction);
|
||||||
|
|
||||||
let info = &infos[instruction as usize];
|
let info = &infos[instruction as usize];
|
||||||
self.verify_instruction(ext, instruction, info, &stack)?;
|
self.verify_instruction(ext, instruction, info, &stack)?;
|
||||||
|
|
||||||
// Calculate gas cost
|
// Calculate gas cost
|
||||||
let requirements = gasometer.requirements(ext, instruction, info, &stack, self.mem.size())?;
|
let requirements = gasometer.requirements(ext, instruction, info, &stack, self.mem.size())?;
|
||||||
// TODO: make compile-time removable if too much of a performance hit.
|
if do_trace {
|
||||||
let trace_executed = ext.trace_prepare_execute(reader.position - 1, instruction, &requirements.gas_cost.as_u256());
|
ext.trace_prepare_execute(reader.position - 1, instruction, requirements.gas_cost.as_u256());
|
||||||
|
}
|
||||||
|
|
||||||
gasometer.verify_gas(&requirements.gas_cost)?;
|
gasometer.verify_gas(&requirements.gas_cost)?;
|
||||||
self.mem.expand(requirements.memory_required_size);
|
self.mem.expand(requirements.memory_required_size);
|
||||||
@ -139,7 +144,7 @@ impl<Cost: CostType> evm::Evm for Interpreter<Cost> {
|
|||||||
|
|
||||||
evm_debug!({ informant.before_instruction(reader.position, instruction, info, &gasometer.current_gas, &stack) });
|
evm_debug!({ informant.before_instruction(reader.position, instruction, info, &gasometer.current_gas, &stack) });
|
||||||
|
|
||||||
let (mem_written, store_written) = match trace_executed {
|
let (mem_written, store_written) = match do_trace {
|
||||||
true => (Self::mem_written(instruction, &stack), Self::store_written(instruction, &stack)),
|
true => (Self::mem_written(instruction, &stack), Self::store_written(instruction, &stack)),
|
||||||
false => (None, None),
|
false => (None, None),
|
||||||
};
|
};
|
||||||
@ -155,7 +160,7 @@ impl<Cost: CostType> evm::Evm for Interpreter<Cost> {
|
|||||||
gasometer.current_gas = gasometer.current_gas + *gas;
|
gasometer.current_gas = gasometer.current_gas + *gas;
|
||||||
}
|
}
|
||||||
|
|
||||||
if trace_executed {
|
if do_trace {
|
||||||
ext.trace_executed(gasometer.current_gas.as_u256(), stack.peek_top(info.ret), mem_written.map(|(o, s)| (o, &(self.mem[o..(o + s)]))), store_written);
|
ext.trace_executed(gasometer.current_gas.as_u256(), stack.peek_top(info.ret), mem_written.map(|(o, s)| (o, &(self.mem[o..(o + s)]))), store_written);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -376,7 +376,7 @@ impl<'a, B: 'a + StateBackend, E: Engine + ?Sized> Executive<'a, B, E> {
|
|||||||
self.exec_vm(params, &mut unconfirmed_substate, OutputPolicy::Return(output, trace_output.as_mut()), &mut subtracer, &mut subvmtracer)
|
self.exec_vm(params, &mut unconfirmed_substate, OutputPolicy::Return(output, trace_output.as_mut()), &mut subtracer, &mut subvmtracer)
|
||||||
};
|
};
|
||||||
|
|
||||||
vm_tracer.done_subtrace(subvmtracer, res.is_ok());
|
vm_tracer.done_subtrace(subvmtracer);
|
||||||
|
|
||||||
trace!(target: "executive", "res={:?}", res);
|
trace!(target: "executive", "res={:?}", res);
|
||||||
|
|
||||||
@ -457,7 +457,7 @@ impl<'a, B: 'a + StateBackend, E: Engine + ?Sized> Executive<'a, B, E> {
|
|||||||
self.exec_vm(params, &mut unconfirmed_substate, OutputPolicy::InitContract(trace_output.as_mut()), &mut subtracer, &mut subvmtracer)
|
self.exec_vm(params, &mut unconfirmed_substate, OutputPolicy::InitContract(trace_output.as_mut()), &mut subtracer, &mut subvmtracer)
|
||||||
};
|
};
|
||||||
|
|
||||||
vm_tracer.done_subtrace(subvmtracer, res.is_ok());
|
vm_tracer.done_subtrace(subvmtracer);
|
||||||
|
|
||||||
match res {
|
match res {
|
||||||
Ok(ref res) => tracer.trace_create(
|
Ok(ref res) => tracer.trace_create(
|
||||||
|
@ -379,7 +379,11 @@ impl<'a, T: 'a, V: 'a, B: 'a, E: 'a> Ext for Externalities<'a, T, V, B, E>
|
|||||||
self.substate.sstore_clears_count = self.substate.sstore_clears_count + U256::one();
|
self.substate.sstore_clears_count = self.substate.sstore_clears_count + U256::one();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn trace_prepare_execute(&mut self, pc: usize, instruction: u8, gas_cost: &U256) -> bool {
|
fn trace_next_instruction(&mut self, pc: usize, instruction: u8) -> bool {
|
||||||
|
self.vm_tracer.trace_next_instruction(pc, instruction)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn trace_prepare_execute(&mut self, pc: usize, instruction: u8, gas_cost: U256) {
|
||||||
self.vm_tracer.trace_prepare_execute(pc, instruction, gas_cost)
|
self.vm_tracer.trace_prepare_execute(pc, instruction, gas_cost)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -88,6 +88,8 @@ pub struct MinerOptions {
|
|||||||
pub reseal_on_external_tx: bool,
|
pub reseal_on_external_tx: bool,
|
||||||
/// Reseal on receipt of new local transactions.
|
/// Reseal on receipt of new local transactions.
|
||||||
pub reseal_on_own_tx: bool,
|
pub reseal_on_own_tx: bool,
|
||||||
|
/// Reseal when new uncle block has been imported.
|
||||||
|
pub reseal_on_uncle: bool,
|
||||||
/// Minimum period between transaction-inspired reseals.
|
/// Minimum period between transaction-inspired reseals.
|
||||||
pub reseal_min_period: Duration,
|
pub reseal_min_period: Duration,
|
||||||
/// Maximum period between blocks (enables force sealing after that).
|
/// Maximum period between blocks (enables force sealing after that).
|
||||||
@ -119,6 +121,7 @@ impl Default for MinerOptions {
|
|||||||
force_sealing: false,
|
force_sealing: false,
|
||||||
reseal_on_external_tx: false,
|
reseal_on_external_tx: false,
|
||||||
reseal_on_own_tx: true,
|
reseal_on_own_tx: true,
|
||||||
|
reseal_on_uncle: false,
|
||||||
tx_gas_limit: !U256::zero(),
|
tx_gas_limit: !U256::zero(),
|
||||||
tx_queue_size: 1024,
|
tx_queue_size: 1024,
|
||||||
tx_queue_gas_limit: GasLimit::Auto,
|
tx_queue_gas_limit: GasLimit::Auto,
|
||||||
@ -347,7 +350,7 @@ impl Miner {
|
|||||||
Some(old_block) => {
|
Some(old_block) => {
|
||||||
trace!(target: "miner", "prepare_block: Already have previous work; updating and returning");
|
trace!(target: "miner", "prepare_block: Already have previous work; updating and returning");
|
||||||
// add transactions to old_block
|
// add transactions to old_block
|
||||||
old_block.reopen(&*self.engine)
|
chain.reopen_block(old_block)
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
// block not found - create it.
|
// block not found - create it.
|
||||||
@ -366,7 +369,6 @@ impl Miner {
|
|||||||
let mut transactions_to_penalize = HashSet::new();
|
let mut transactions_to_penalize = HashSet::new();
|
||||||
let block_number = open_block.block().fields().header.number();
|
let block_number = open_block.block().fields().header.number();
|
||||||
|
|
||||||
// TODO Push new uncles too.
|
|
||||||
let mut tx_count: usize = 0;
|
let mut tx_count: usize = 0;
|
||||||
let tx_total = transactions.len();
|
let tx_total = transactions.len();
|
||||||
for tx in transactions {
|
for tx in transactions {
|
||||||
@ -1153,11 +1155,10 @@ impl MinerService for Miner {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn chain_new_blocks(&self, chain: &MiningBlockChainClient, _imported: &[H256], _invalid: &[H256], enacted: &[H256], retracted: &[H256]) {
|
fn chain_new_blocks(&self, chain: &MiningBlockChainClient, imported: &[H256], _invalid: &[H256], enacted: &[H256], retracted: &[H256]) {
|
||||||
trace!(target: "miner", "chain_new_blocks");
|
trace!(target: "miner", "chain_new_blocks");
|
||||||
|
|
||||||
// 1. We ignore blocks that were `imported` (because it means that they are not in canon-chain, and transactions
|
// 1. We ignore blocks that were `imported` unless resealing on new uncles is enabled.
|
||||||
// should be still available in the queue.
|
|
||||||
// 2. We ignore blocks that are `invalid` because it doesn't have any meaning in terms of the transactions that
|
// 2. We ignore blocks that are `invalid` because it doesn't have any meaning in terms of the transactions that
|
||||||
// are in those blocks
|
// are in those blocks
|
||||||
|
|
||||||
@ -1192,7 +1193,7 @@ impl MinerService for Miner {
|
|||||||
transaction_queue.remove_old(&fetch_account, time);
|
transaction_queue.remove_old(&fetch_account, time);
|
||||||
}
|
}
|
||||||
|
|
||||||
if enacted.len() > 0 {
|
if enacted.len() > 0 || (imported.len() > 0 && self.options.reseal_on_uncle) {
|
||||||
// --------------------------------------------------------------------------
|
// --------------------------------------------------------------------------
|
||||||
// | NOTE Code below requires transaction_queue and sealing_work locks. |
|
// | NOTE Code below requires transaction_queue and sealing_work locks. |
|
||||||
// | Make sure to release the locks before calling that method. |
|
// | Make sure to release the locks before calling that method. |
|
||||||
@ -1312,6 +1313,7 @@ mod tests {
|
|||||||
force_sealing: false,
|
force_sealing: false,
|
||||||
reseal_on_external_tx: false,
|
reseal_on_external_tx: false,
|
||||||
reseal_on_own_tx: true,
|
reseal_on_own_tx: true,
|
||||||
|
reseal_on_uncle: false,
|
||||||
reseal_min_period: Duration::from_secs(5),
|
reseal_min_period: Duration::from_secs(5),
|
||||||
reseal_max_period: Duration::from_secs(120),
|
reseal_max_period: Duration::from_secs(120),
|
||||||
tx_gas_limit: !U256::zero(),
|
tx_gas_limit: !U256::zero(),
|
||||||
|
@ -32,7 +32,7 @@
|
|||||||
//! use ethcore::miner::{Miner, MinerService};
|
//! use ethcore::miner::{Miner, MinerService};
|
||||||
//!
|
//!
|
||||||
//! fn main() {
|
//! fn main() {
|
||||||
//! let miner: Miner = Miner::with_spec(ðereum::new_foundation());
|
//! let miner: Miner = Miner::with_spec(ðereum::new_foundation(&env::temp_dir()));
|
||||||
//! // get status
|
//! // get status
|
||||||
//! assert_eq!(miner.status().transactions_in_pending_queue, 0);
|
//! assert_eq!(miner.status().transactions_in_pending_queue, 0);
|
||||||
//!
|
//!
|
||||||
|
@ -59,7 +59,7 @@ lazy_static! {
|
|||||||
/// `native_contracts::test_contracts::ValidatorSet` provides a native wrapper for the ABi.
|
/// `native_contracts::test_contracts::ValidatorSet` provides a native wrapper for the ABi.
|
||||||
fn spec_fixed_to_contract() -> Spec {
|
fn spec_fixed_to_contract() -> Spec {
|
||||||
let data = include_bytes!("test_validator_contract.json");
|
let data = include_bytes!("test_validator_contract.json");
|
||||||
Spec::load(&data[..]).unwrap()
|
Spec::load(&::std::env::temp_dir(), &data[..]).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
// creates an account provider, filling it with accounts from all the given
|
// creates an account provider, filling it with accounts from all the given
|
||||||
|
@ -158,7 +158,7 @@ pub struct Spec {
|
|||||||
genesis_state: PodState,
|
genesis_state: PodState,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_from(s: ethjson::spec::Spec) -> Result<Spec, Error> {
|
fn load_from<T: AsRef<Path>>(cache_dir: T, s: ethjson::spec::Spec) -> Result<Spec, Error> {
|
||||||
let builtins = s.accounts.builtins().into_iter().map(|p| (p.0.into(), From::from(p.1))).collect();
|
let builtins = s.accounts.builtins().into_iter().map(|p| (p.0.into(), From::from(p.1))).collect();
|
||||||
let g = Genesis::from(s.genesis);
|
let g = Genesis::from(s.genesis);
|
||||||
let GenericSeal(seal_rlp) = g.seal.into();
|
let GenericSeal(seal_rlp) = g.seal.into();
|
||||||
@ -166,7 +166,7 @@ fn load_from(s: ethjson::spec::Spec) -> Result<Spec, Error> {
|
|||||||
|
|
||||||
let mut s = Spec {
|
let mut s = Spec {
|
||||||
name: s.name.clone().into(),
|
name: s.name.clone().into(),
|
||||||
engine: Spec::engine(s.engine, params, builtins),
|
engine: Spec::engine(cache_dir, s.engine, params, builtins),
|
||||||
data_dir: s.data_dir.unwrap_or(s.name).into(),
|
data_dir: s.data_dir.unwrap_or(s.name).into(),
|
||||||
nodes: s.nodes.unwrap_or_else(Vec::new),
|
nodes: s.nodes.unwrap_or_else(Vec::new),
|
||||||
parent_hash: g.parent_hash,
|
parent_hash: g.parent_hash,
|
||||||
@ -195,18 +195,26 @@ fn load_from(s: ethjson::spec::Spec) -> Result<Spec, Error> {
|
|||||||
|
|
||||||
macro_rules! load_bundled {
|
macro_rules! load_bundled {
|
||||||
($e:expr) => {
|
($e:expr) => {
|
||||||
Spec::load(include_bytes!(concat!("../../res/", $e, ".json")) as &[u8]).expect(concat!("Chain spec ", $e, " is invalid."))
|
Spec::load(
|
||||||
|
&::std::env::temp_dir(),
|
||||||
|
include_bytes!(concat!("../../res/", $e, ".json")) as &[u8]
|
||||||
|
).expect(concat!("Chain spec ", $e, " is invalid."))
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Spec {
|
impl Spec {
|
||||||
/// Convert engine spec into a arc'd Engine of the right underlying type.
|
/// Convert engine spec into a arc'd Engine of the right underlying type.
|
||||||
/// TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead.
|
/// TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead.
|
||||||
fn engine(engine_spec: ethjson::spec::Engine, params: CommonParams, builtins: BTreeMap<Address, Builtin>) -> Arc<Engine> {
|
fn engine<T: AsRef<Path>>(
|
||||||
|
cache_dir: T,
|
||||||
|
engine_spec: ethjson::spec::Engine,
|
||||||
|
params: CommonParams,
|
||||||
|
builtins: BTreeMap<Address, Builtin>,
|
||||||
|
) -> Arc<Engine> {
|
||||||
match engine_spec {
|
match engine_spec {
|
||||||
ethjson::spec::Engine::Null => Arc::new(NullEngine::new(params, builtins)),
|
ethjson::spec::Engine::Null => Arc::new(NullEngine::new(params, builtins)),
|
||||||
ethjson::spec::Engine::InstantSeal(instant) => Arc::new(InstantSeal::new(params, instant.params.registrar.map_or_else(Address::new, Into::into), builtins)),
|
ethjson::spec::Engine::InstantSeal(instant) => Arc::new(InstantSeal::new(params, instant.params.registrar.map_or_else(Address::new, Into::into), builtins)),
|
||||||
ethjson::spec::Engine::Ethash(ethash) => Arc::new(ethereum::Ethash::new(params, From::from(ethash.params), builtins)),
|
ethjson::spec::Engine::Ethash(ethash) => Arc::new(ethereum::Ethash::new(cache_dir, params, From::from(ethash.params), builtins)),
|
||||||
ethjson::spec::Engine::BasicAuthority(basic_authority) => Arc::new(BasicAuthority::new(params, From::from(basic_authority.params), builtins)),
|
ethjson::spec::Engine::BasicAuthority(basic_authority) => Arc::new(BasicAuthority::new(params, From::from(basic_authority.params), builtins)),
|
||||||
ethjson::spec::Engine::AuthorityRound(authority_round) => AuthorityRound::new(params, From::from(authority_round.params), builtins).expect("Failed to start AuthorityRound consensus engine."),
|
ethjson::spec::Engine::AuthorityRound(authority_round) => AuthorityRound::new(params, From::from(authority_round.params), builtins).expect("Failed to start AuthorityRound consensus engine."),
|
||||||
ethjson::spec::Engine::Tendermint(tendermint) => Tendermint::new(params, From::from(tendermint.params), builtins).expect("Failed to start the Tendermint consensus engine."),
|
ethjson::spec::Engine::Tendermint(tendermint) => Tendermint::new(params, From::from(tendermint.params), builtins).expect("Failed to start the Tendermint consensus engine."),
|
||||||
@ -397,13 +405,13 @@ impl Spec {
|
|||||||
|
|
||||||
/// Loads spec from json file. Provide factories for executing contracts and ensuring
|
/// Loads spec from json file. Provide factories for executing contracts and ensuring
|
||||||
/// storage goes to the right place.
|
/// storage goes to the right place.
|
||||||
pub fn load<R>(reader: R) -> Result<Self, String> where R: Read {
|
pub fn load<T: AsRef<Path>, R>(cache_dir: T, reader: R) -> Result<Self, String> where R: Read {
|
||||||
fn fmt<F: ::std::fmt::Display>(f: F) -> String {
|
fn fmt<F: ::std::fmt::Display>(f: F) -> String {
|
||||||
format!("Spec json is invalid: {}", f)
|
format!("Spec json is invalid: {}", f)
|
||||||
}
|
}
|
||||||
|
|
||||||
ethjson::spec::Spec::load(reader).map_err(fmt)
|
ethjson::spec::Spec::load(reader).map_err(fmt)
|
||||||
.and_then(|x| load_from(x).map_err(fmt))
|
.and_then(|x| load_from(cache_dir, x).map_err(fmt))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus.
|
/// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus.
|
||||||
@ -453,7 +461,7 @@ mod tests {
|
|||||||
// https://github.com/paritytech/parity/issues/1840
|
// https://github.com/paritytech/parity/issues/1840
|
||||||
#[test]
|
#[test]
|
||||||
fn test_load_empty() {
|
fn test_load_empty() {
|
||||||
assert!(Spec::load(&[] as &[u8]).is_err());
|
assert!(Spec::load(::std::env::temp_dir(), &[] as &[u8]).is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -52,7 +52,7 @@ fn imports_from_empty() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn should_return_registrar() {
|
fn should_return_registrar() {
|
||||||
let dir = RandomTempPath::new();
|
let dir = RandomTempPath::new();
|
||||||
let spec = ethereum::new_morden();
|
let spec = ethereum::new_morden(&dir);
|
||||||
let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
|
let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
|
||||||
let client_db = Arc::new(Database::open(&db_config, dir.as_path().to_str().unwrap()).unwrap());
|
let client_db = Arc::new(Database::open(&db_config, dir.as_path().to_str().unwrap()).unwrap());
|
||||||
|
|
||||||
|
@ -192,14 +192,15 @@ impl ExecutiveVMTracer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl VMTracer for ExecutiveVMTracer {
|
impl VMTracer for ExecutiveVMTracer {
|
||||||
fn trace_prepare_execute(&mut self, pc: usize, instruction: u8, gas_cost: &U256) -> bool {
|
fn trace_next_instruction(&mut self, _pc: usize, _instruction: u8) -> bool { true }
|
||||||
|
|
||||||
|
fn trace_prepare_execute(&mut self, pc: usize, instruction: u8, gas_cost: U256) {
|
||||||
self.data.operations.push(VMOperation {
|
self.data.operations.push(VMOperation {
|
||||||
pc: pc,
|
pc: pc,
|
||||||
instruction: instruction,
|
instruction: instruction,
|
||||||
gas_cost: gas_cost.clone(),
|
gas_cost: gas_cost,
|
||||||
executed: None,
|
executed: None,
|
||||||
});
|
});
|
||||||
true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn trace_executed(&mut self, gas_used: U256, stack_push: &[U256], mem_diff: Option<(usize, &[u8])>, store_diff: Option<(U256, U256)>) {
|
fn trace_executed(&mut self, gas_used: U256, stack_push: &[U256], mem_diff: Option<(usize, &[u8])>, store_diff: Option<(U256, U256)>) {
|
||||||
@ -221,7 +222,7 @@ impl VMTracer for ExecutiveVMTracer {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn done_subtrace(&mut self, sub: Self, _is_successful: bool) {
|
fn done_subtrace(&mut self, sub: Self) {
|
||||||
self.data.subs.push(sub.data);
|
self.data.subs.push(sub.data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,18 +87,23 @@ pub trait Tracer: Send {
|
|||||||
|
|
||||||
/// Used by executive to build VM traces.
|
/// Used by executive to build VM traces.
|
||||||
pub trait VMTracer: Send {
|
pub trait VMTracer: Send {
|
||||||
/// Trace the preparation to execute a single instruction.
|
|
||||||
/// @returns true if `trace_executed` should be called.
|
|
||||||
fn trace_prepare_execute(&mut self, _pc: usize, _instruction: u8, _gas_cost: &U256) -> bool { false }
|
|
||||||
|
|
||||||
/// Trace the finalised execution of a single instruction.
|
/// Trace the progression of interpreter to next instruction.
|
||||||
|
/// If tracer returns `false` it won't be called again.
|
||||||
|
/// @returns true if `trace_prepare_execute` and `trace_executed` should be called.
|
||||||
|
fn trace_next_instruction(&mut self, _pc: usize, _instruction: u8) -> bool { false }
|
||||||
|
|
||||||
|
/// Trace the preparation to execute a single valid instruction.
|
||||||
|
fn trace_prepare_execute(&mut self, _pc: usize, _instruction: u8, _gas_cost: U256) {}
|
||||||
|
|
||||||
|
/// Trace the finalised execution of a single valid instruction.
|
||||||
fn trace_executed(&mut self, _gas_used: U256, _stack_push: &[U256], _mem_diff: Option<(usize, &[u8])>, _store_diff: Option<(U256, U256)>) {}
|
fn trace_executed(&mut self, _gas_used: U256, _stack_push: &[U256], _mem_diff: Option<(usize, &[u8])>, _store_diff: Option<(U256, U256)>) {}
|
||||||
|
|
||||||
/// Spawn subtracer which will be used to trace deeper levels of execution.
|
/// Spawn subtracer which will be used to trace deeper levels of execution.
|
||||||
fn prepare_subtrace(&self, code: &[u8]) -> Self where Self: Sized;
|
fn prepare_subtrace(&self, code: &[u8]) -> Self where Self: Sized;
|
||||||
|
|
||||||
/// Finalize subtracer.
|
/// Finalize subtracer.
|
||||||
fn done_subtrace(&mut self, sub: Self, is_successful: bool) where Self: Sized;
|
fn done_subtrace(&mut self, sub: Self) where Self: Sized;
|
||||||
|
|
||||||
/// Consumes self and returns the VM trace.
|
/// Consumes self and returns the VM trace.
|
||||||
fn drain(self) -> Option<VMTrace>;
|
fn drain(self) -> Option<VMTrace>;
|
||||||
|
@ -71,18 +71,15 @@ impl Tracer for NoopTracer {
|
|||||||
pub struct NoopVMTracer;
|
pub struct NoopVMTracer;
|
||||||
|
|
||||||
impl VMTracer for NoopVMTracer {
|
impl VMTracer for NoopVMTracer {
|
||||||
/// Trace the preparation to execute a single instruction.
|
fn trace_next_instruction(&mut self, _pc: usize, _instruction: u8) -> bool { false }
|
||||||
fn trace_prepare_execute(&mut self, _pc: usize, _instruction: u8, _gas_cost: &U256) -> bool { false }
|
|
||||||
|
fn trace_prepare_execute(&mut self, _pc: usize, _instruction: u8, _gas_cost: U256) {}
|
||||||
|
|
||||||
/// Trace the finalised execution of a single instruction.
|
|
||||||
fn trace_executed(&mut self, _gas_used: U256, _stack_push: &[U256], _mem_diff: Option<(usize, &[u8])>, _store_diff: Option<(U256, U256)>) {}
|
fn trace_executed(&mut self, _gas_used: U256, _stack_push: &[U256], _mem_diff: Option<(usize, &[u8])>, _store_diff: Option<(U256, U256)>) {}
|
||||||
|
|
||||||
/// Spawn subtracer which will be used to trace deeper levels of execution.
|
|
||||||
fn prepare_subtrace(&self, _code: &[u8]) -> Self { NoopVMTracer }
|
fn prepare_subtrace(&self, _code: &[u8]) -> Self { NoopVMTracer }
|
||||||
|
|
||||||
/// Spawn subtracer which will be used to trace deeper levels of execution.
|
fn done_subtrace(&mut self, _sub: Self) {}
|
||||||
fn done_subtrace(&mut self, _sub: Self, _is_successful: bool) {}
|
|
||||||
|
|
||||||
/// Consumes self and returns all VM traces.
|
|
||||||
fn drain(self) -> Option<VMTrace> { None }
|
fn drain(self) -> Option<VMTrace> { None }
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
//! Verification queue info types
|
//! Verification queue info types
|
||||||
|
|
||||||
/// Verification queue status
|
/// Verification queue status
|
||||||
#[derive(Debug)]
|
#[derive(Debug, Clone)]
|
||||||
#[cfg_attr(feature = "ipc", binary)]
|
#[cfg_attr(feature = "ipc", binary)]
|
||||||
pub struct VerificationQueueInfo {
|
pub struct VerificationQueueInfo {
|
||||||
/// Number of queued items pending verification
|
/// Number of queued items pending verification
|
||||||
|
@ -132,12 +132,17 @@ pub fn verify_block_family(header: &Header, bytes: &[u8], engine: &Engine, bc: &
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mut verified = HashSet::new();
|
||||||
for uncle in UntrustedRlp::new(bytes).at(2)?.iter().map(|rlp| rlp.as_val::<Header>()) {
|
for uncle in UntrustedRlp::new(bytes).at(2)?.iter().map(|rlp| rlp.as_val::<Header>()) {
|
||||||
let uncle = uncle?;
|
let uncle = uncle?;
|
||||||
if excluded.contains(&uncle.hash()) {
|
if excluded.contains(&uncle.hash()) {
|
||||||
return Err(From::from(BlockError::UncleInChain(uncle.hash())))
|
return Err(From::from(BlockError::UncleInChain(uncle.hash())))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if verified.contains(&uncle.hash()) {
|
||||||
|
return Err(From::from(BlockError::DuplicateUncle(uncle.hash())))
|
||||||
|
}
|
||||||
|
|
||||||
// m_currentBlock.number() - uncle.number() m_cB.n - uP.n()
|
// m_currentBlock.number() - uncle.number() m_cB.n - uP.n()
|
||||||
// 1 2
|
// 1 2
|
||||||
// 2
|
// 2
|
||||||
@ -180,6 +185,7 @@ pub fn verify_block_family(header: &Header, bytes: &[u8], engine: &Engine, bc: &
|
|||||||
|
|
||||||
verify_parent(&uncle, &uncle_parent)?;
|
verify_parent(&uncle, &uncle_parent)?;
|
||||||
engine.verify_block_family(&uncle, &uncle_parent, Some(bytes))?;
|
engine.verify_block_family(&uncle, &uncle_parent, Some(bytes))?;
|
||||||
|
verified.insert(uncle.hash());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -568,6 +574,11 @@ mod tests {
|
|||||||
check_fail(family_test(&create_test_block_with_data(&header, &good_transactions, &bad_uncles), engine, &bc),
|
check_fail(family_test(&create_test_block_with_data(&header, &good_transactions, &bad_uncles), engine, &bc),
|
||||||
TooManyUncles(OutOfBounds { max: Some(engine.maximum_uncle_count()), min: None, found: bad_uncles.len() }));
|
TooManyUncles(OutOfBounds { max: Some(engine.maximum_uncle_count()), min: None, found: bad_uncles.len() }));
|
||||||
|
|
||||||
|
header = good.clone();
|
||||||
|
bad_uncles = vec![ good_uncle1.clone(), good_uncle1.clone() ];
|
||||||
|
check_fail(family_test(&create_test_block_with_data(&header, &good_transactions, &bad_uncles), engine, &bc),
|
||||||
|
DuplicateUncle(good_uncle1.hash()));
|
||||||
|
|
||||||
// TODO: some additional uncle checks
|
// TODO: some additional uncle checks
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,13 +81,18 @@ impl vm::Informant for Informant {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl trace::VMTracer for Informant {
|
impl trace::VMTracer for Informant {
|
||||||
fn trace_prepare_execute(&mut self, pc: usize, instruction: u8, gas_cost: &U256) -> bool {
|
fn trace_next_instruction(&mut self, pc: usize, instruction: u8) -> bool {
|
||||||
self.pc = pc;
|
self.pc = pc;
|
||||||
self.instruction = instruction;
|
self.instruction = instruction;
|
||||||
self.gas_cost = *gas_cost;
|
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn trace_prepare_execute(&mut self, pc: usize, instruction: u8, gas_cost: U256) {
|
||||||
|
self.pc = pc;
|
||||||
|
self.instruction = instruction;
|
||||||
|
self.gas_cost = gas_cost;
|
||||||
|
}
|
||||||
|
|
||||||
fn trace_executed(&mut self, gas_used: U256, stack_push: &[U256], mem_diff: Option<(usize, &[u8])>, store_diff: Option<(U256, U256)>) {
|
fn trace_executed(&mut self, gas_used: U256, stack_push: &[U256], mem_diff: Option<(usize, &[u8])>, store_diff: Option<(U256, U256)>) {
|
||||||
let info = evm::INSTRUCTIONS[self.instruction as usize];
|
let info = evm::INSTRUCTIONS[self.instruction as usize];
|
||||||
|
|
||||||
@ -127,17 +132,9 @@ impl trace::VMTracer for Informant {
|
|||||||
vm
|
vm
|
||||||
}
|
}
|
||||||
|
|
||||||
fn done_subtrace(&mut self, mut sub: Self, is_successful: bool) where Self: Sized {
|
fn done_subtrace(&mut self, mut sub: Self) {
|
||||||
if sub.depth == 1 {
|
if sub.depth == 1 {
|
||||||
// print last line with final state:
|
// print last line with final state:
|
||||||
if is_successful {
|
|
||||||
sub.pc += 1;
|
|
||||||
sub.instruction = 0;
|
|
||||||
} else {
|
|
||||||
let push_bytes = evm::push_bytes(sub.instruction);
|
|
||||||
sub.pc += if push_bytes > 0 { push_bytes + 1 } else { 0 };
|
|
||||||
sub.instruction = if sub.pc < sub.code.len() { sub.code[sub.pc] } else { 0 };
|
|
||||||
}
|
|
||||||
sub.gas_cost = 0.into();
|
sub.gas_cost = 0.into();
|
||||||
let gas_used = sub.gas_used;
|
let gas_used = sub.gas_used;
|
||||||
trace::VMTracer::trace_executed(&mut sub, gas_used, &[], None, None);
|
trace::VMTracer::trace_executed(&mut sub, gas_used, &[], None, None);
|
||||||
|
@ -44,6 +44,6 @@ impl vm::Informant for Informant {
|
|||||||
|
|
||||||
impl trace::VMTracer for Informant {
|
impl trace::VMTracer for Informant {
|
||||||
fn prepare_subtrace(&self, _code: &[u8]) -> Self where Self: Sized { Default::default() }
|
fn prepare_subtrace(&self, _code: &[u8]) -> Self where Self: Sized { Default::default() }
|
||||||
fn done_subtrace(&mut self, _sub: Self, _is_successful: bool) where Self: Sized {}
|
fn done_subtrace(&mut self, _sub: Self) {}
|
||||||
fn drain(self) -> Option<trace::VMTrace> { None }
|
fn drain(self) -> Option<trace::VMTrace> { None }
|
||||||
}
|
}
|
||||||
|
@ -137,7 +137,7 @@ impl Args {
|
|||||||
spec::Spec::load(file)?
|
spec::Spec::load(file)?
|
||||||
},
|
},
|
||||||
None => {
|
None => {
|
||||||
spec::Spec::new_instant()
|
ethcore::ethereum::new_foundation()
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
37
js/package-lock.json
generated
37
js/package-lock.json
generated
@ -281,9 +281,9 @@
|
|||||||
"dev": true
|
"dev": true
|
||||||
},
|
},
|
||||||
"asap": {
|
"asap": {
|
||||||
"version": "2.0.5",
|
"version": "2.0.6",
|
||||||
"resolved": "https://registry.npmjs.org/asap/-/asap-2.0.5.tgz",
|
"resolved": "https://registry.npmjs.org/asap/-/asap-2.0.6.tgz",
|
||||||
"integrity": "sha1-UidltQw1EEkOUtfc/ghe+bqWlY8="
|
"integrity": "sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY="
|
||||||
},
|
},
|
||||||
"asn1": {
|
"asn1": {
|
||||||
"version": "0.2.3",
|
"version": "0.2.3",
|
||||||
@ -5738,6 +5738,11 @@
|
|||||||
"resolved": "https://registry.npmjs.org/lodash.throttle/-/lodash.throttle-4.1.1.tgz",
|
"resolved": "https://registry.npmjs.org/lodash.throttle/-/lodash.throttle-4.1.1.tgz",
|
||||||
"integrity": "sha1-wj6RtxAkKscMN/HhzaknTMOb8vQ="
|
"integrity": "sha1-wj6RtxAkKscMN/HhzaknTMOb8vQ="
|
||||||
},
|
},
|
||||||
|
"lodash.toarray": {
|
||||||
|
"version": "4.4.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/lodash.toarray/-/lodash.toarray-4.4.0.tgz",
|
||||||
|
"integrity": "sha1-JMS/zWsvuji/0FlNsRedjptlZWE="
|
||||||
|
},
|
||||||
"lodash.uniq": {
|
"lodash.uniq": {
|
||||||
"version": "4.5.0",
|
"version": "4.5.0",
|
||||||
"resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz",
|
"resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz",
|
||||||
@ -5846,6 +5851,26 @@
|
|||||||
"resolved": "https://registry.npmjs.org/map-obj/-/map-obj-1.0.1.tgz",
|
"resolved": "https://registry.npmjs.org/map-obj/-/map-obj-1.0.1.tgz",
|
||||||
"integrity": "sha1-2TPOuSBdgr3PSIb2dCvcK03qFG0="
|
"integrity": "sha1-2TPOuSBdgr3PSIb2dCvcK03qFG0="
|
||||||
},
|
},
|
||||||
|
"markdown-loader": {
|
||||||
|
"version": "2.0.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/markdown-loader/-/markdown-loader-2.0.0.tgz",
|
||||||
|
"integrity": "sha1-Qhhi04xCJP02FetkgBfqOFtWLXg=",
|
||||||
|
"dev": true,
|
||||||
|
"dependencies": {
|
||||||
|
"loader-utils": {
|
||||||
|
"version": "0.2.17",
|
||||||
|
"resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-0.2.17.tgz",
|
||||||
|
"integrity": "sha1-+G5jdNQyBabmxg6RlvF8Apm/s0g=",
|
||||||
|
"dev": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"marked": {
|
||||||
|
"version": "0.3.6",
|
||||||
|
"resolved": "https://registry.npmjs.org/marked/-/marked-0.3.6.tgz",
|
||||||
|
"integrity": "sha1-ssbGGPzOzk74bE/Gy4p8v1rtqNc=",
|
||||||
|
"dev": true
|
||||||
|
},
|
||||||
"material-ui": {
|
"material-ui": {
|
||||||
"version": "0.16.5",
|
"version": "0.16.5",
|
||||||
"resolved": "https://registry.npmjs.org/material-ui/-/material-ui-0.16.5.tgz",
|
"resolved": "https://registry.npmjs.org/material-ui/-/material-ui-0.16.5.tgz",
|
||||||
@ -6164,9 +6189,9 @@
|
|||||||
"dev": true
|
"dev": true
|
||||||
},
|
},
|
||||||
"node-emoji": {
|
"node-emoji": {
|
||||||
"version": "1.6.1",
|
"version": "1.7.0",
|
||||||
"resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-1.6.1.tgz",
|
"resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-1.7.0.tgz",
|
||||||
"integrity": "sha512-cMmAHLDT/QHCSuHBL3vQ8qXwFypx/3zN1sForcR65OM8FbC+LKefUCI29NAik4Crpiv2byyWj1SWZRlNBtbi2g=="
|
"integrity": "sha512-dYx345sjhPJUpWaVQKjP0/43y+nTcfBRTZfSciM3ZEbRGaU/9AKaHBPf7AJ9vOKcK0W3v67AgI4m4oo02NLHhQ=="
|
||||||
},
|
},
|
||||||
"node-fetch": {
|
"node-fetch": {
|
||||||
"version": "1.6.3",
|
"version": "1.6.3",
|
||||||
|
@ -122,6 +122,7 @@
|
|||||||
"istanbul": "1.0.0-alpha.2",
|
"istanbul": "1.0.0-alpha.2",
|
||||||
"jsdom": "9.11.0",
|
"jsdom": "9.11.0",
|
||||||
"json-loader": "0.5.4",
|
"json-loader": "0.5.4",
|
||||||
|
"markdown-loader": "2.0.0",
|
||||||
"mocha": "3.2.0",
|
"mocha": "3.2.0",
|
||||||
"mock-local-storage": "1.0.2",
|
"mock-local-storage": "1.0.2",
|
||||||
"mock-socket": "6.0.4",
|
"mock-socket": "6.0.4",
|
||||||
|
@ -16,147 +16,25 @@
|
|||||||
|
|
||||||
import React, { PropTypes } from 'react';
|
import React, { PropTypes } from 'react';
|
||||||
import { FormattedMessage } from 'react-intl';
|
import { FormattedMessage } from 'react-intl';
|
||||||
|
import ReactMarkdown from 'react-markdown';
|
||||||
|
|
||||||
import { Checkbox } from '@parity/ui';
|
import { Checkbox } from '@parity/ui';
|
||||||
|
|
||||||
import styles from '../firstRun.css';
|
import styles from '../firstRun.css';
|
||||||
|
|
||||||
|
let tnc = '';
|
||||||
|
|
||||||
|
if (process.env.NODE_ENV !== 'test') {
|
||||||
|
tnc = require('./tnc.md');
|
||||||
|
}
|
||||||
|
|
||||||
export default function TnC ({ hasAccepted, onAccept }) {
|
export default function TnC ({ hasAccepted, onAccept }) {
|
||||||
return (
|
return (
|
||||||
<div className={ styles.tnc }>
|
<div className={ styles.tnc }>
|
||||||
<h1>SECURITY WARNINGS</h1>
|
<ReactMarkdown
|
||||||
<ul>
|
className={ styles.markdown }
|
||||||
<li>You are responsible for your own computer security. If your machine is compromised you will lose your ether, access to any contracts and maybe more.</li>
|
source={ tnc }
|
||||||
<li>You are responsible for your own actions. If you mess something up or break any laws while using this software, it is your fault, and your fault only.</li>
|
/>
|
||||||
</ul>
|
|
||||||
|
|
||||||
<h1>LEGAL WARNING</h1>
|
|
||||||
<h2>SHORT VERSION</h2>
|
|
||||||
<h3>Disclaimer of Liability and Warranties</h3>
|
|
||||||
<ul>
|
|
||||||
<li>The user expressly knows and agrees that the user is using Parity at the user’s sole risk.</li>
|
|
||||||
<li>The user represents that the user has an adequate understanding of the risks, usage and intricacies of cryptographic tokens and blockchain-based open source software, eth platform and eth.</li>
|
|
||||||
<li>The user acknowledges and agrees that, to the fullest extent permitted by any applicable law, the disclaimers of liability contained herein apply to any and all damages or injury whatsoever caused by or related to risks of, use of, or inability to use, Parity under any cause or action whatsoever of any kind in any jurisdiction, including, without limitation, actions for breach of warranty, breach of contract or tort (including negligence) and that Eth Core Limited shall be not liable for any indirect, incidental, special, exemplary or consequential damages, including for loss of profits, goodwill or data.</li>
|
|
||||||
<li>Some jurisdictions do not allow the exclusion of certain warranties or the limitation or exclusion of liability for certain types of damages. Therefore, some of the above limitations in this section may not apply to a user. In particular, nothing in these terms shall affect the statutory rights of any user or exclude injury arising from any wilful misconduct or fraud of Eth Core Limited.</li>
|
|
||||||
<li>All rights reserved by Parity. Licensed to the public under the GPL v3 <a href='https://www.gnu.org/licenses/gpl-3.0.txt' target='_blank'>https://www.gnu.org/licenses/gpl-3.0.txt</a></li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<h2>LONG VERSION</h2>
|
|
||||||
<p>The following Terms and Conditions (“Terms”) govern the use of Parity Technologies Limited’s open source software product (“Parity”). Prior to any use of the Parity or any of Parity Technologies Limited’s products (“Parity’s Products”), the user or anyone on whose behalf the software is used for directly or indirectly (“User”) confirms that they understand and expressly agree to all of the Terms. All capitalized terms in this agreement will be given the same effect and meaning as in the Terms. The group of developers and other personnel that is now, or will be, employed by, or contracted with, or affiliated with, Parity Technologies Limited (“Parity”) is termed the “Parity Team”.</p>
|
|
||||||
|
|
||||||
<h3>Acknowledgement of Risks</h3>
|
|
||||||
<p>The user acknowledges the following serious risks to any use Parity and expressly agrees not to hold liable Parity or the Parity Team should any of these risks occur:</p>
|
|
||||||
|
|
||||||
<h3>Risk of Security Weaknesses in the Parity Core Infrastructure Software</h3>
|
|
||||||
<p>Parity rests on open-source software, and although it is professionally developed in line with industry standards (which include external audits of the code base), there is a risk that Parity or the Parity Team, may have introduce unintentional weaknesses or bugs into the core infrastructural elements of Parity causing the system to lose ETH stored in one or more User accounts or other accounts or lose sums of other valued tokens.</p>
|
|
||||||
|
|
||||||
<h3>Risk of Weaknesses or Exploitable Breakthroughs in the Field of Cryptography</h3>
|
|
||||||
<p>Cryptography is an art, not a science. And the state of the art can advance over time Advances in code cracking, or technical advances such as the development of quantum computers, could present risks to cryptocurrencies and Parity, which could result in the theft or loss of ETH. To the extent possible, Parity intends to update the protocol underlying Parity to account for any advances in cryptography and to incorporate additional security measures, but it cannot predict the future of cryptography or guaranty that any security updates will be made, timely or successful.</p>
|
|
||||||
|
|
||||||
<h3>Risk of Ether Mining Attacks</h3>
|
|
||||||
<p>As with other cryptocurrencies, the blockchain used by Parity is susceptible to mining attacks, including but not limited to double-spend attacks, majority mining power attacks, “selfish-mining” attacks, and race condition attacks. Any successful attacks present a risk to the Ethereum ecosystem, expected proper execution and sequencing of ETH transactions, and expected proper execution and sequencing of contract computations. Despite the efforts of the Parity and the Parity Team, known or novel mining attacks may be successful.</p>
|
|
||||||
|
|
||||||
<h3>Risk of Rapid Adoption and Insufficiency of Computational Application Processing Power on the Ethereum Network</h3>
|
|
||||||
<p>If Parity is rapidly adopted, the demand for transaction processing and decentralized application computations could rise dramatically and at a pace that exceeds the rate with which ETH miners can bring online additional mining power. Under such a scenario, the entire Ethereum ecosystem could become destabilized, due to the increased cost of running decentralized applications. In turn, this could dampen interest in the Ethereum ecosystem and ETH. Insufficiency of computational resources and an associated rise in the price of ETH could result in businesses being unable to acquire scarce computational resources to run their decentralized applications. This would represent revenue losses to businesses or worst case, cause businesses to cease operations because such operations have become uneconomical due to distortions in the crypto-economy.</p>
|
|
||||||
|
|
||||||
<h3>Risk of temporary network incoherence</h3>
|
|
||||||
<p>We recommend any groups handling large or important transactions to maintain a voluntary 24 hour waiting period on any ether deposited. In case the integrity of the network is at risk due to issues in the clients, we will endeavour to publish patches in a timely fashion to address the issues. We will endeavour to provide solutions within the voluntary 24 hour waiting period.</p>
|
|
||||||
|
|
||||||
<h3>Use of Parity by you</h3>
|
|
||||||
<p>You agree to use Parity only for purposes that are permitted by (a) these Terms and (b) any applicable law, regulation or generally accepted practices or guidelines in the relevant jurisdictions (including any laws regarding the export of data or software to and from the United Kingdom or other relevant countries).</p>
|
|
||||||
<p>You agree that you will not engage in any activity that interferes with or disrupts Parity’s or Parity’s Products’ functioning (or the networks which are connected to Parity).</p>
|
|
||||||
<p>Unless you have been specifically permitted to do so in a separate agreement with Parity, you agree that you will not reproduce, duplicate, copy, sell, trade or resell the Parity’s Products for any purpose unless than in accordance to the terms of the software licence terms available here: <a href='https://www.gnu.org/licenses/gpl-3.0.txt' target='_blank'>https://www.gnu.org/licenses/gpl-3.0.txt</a> (“Software Licence Terms”).</p>
|
|
||||||
<p>You agree that you are solely responsible for (and that Parity has no responsibility to you or to any third party for) any breach of your obligations under these terms and for the consequences (including any loss or damage which Parity may suffer) of any such breach.</p>
|
|
||||||
|
|
||||||
<h3>Privacy and your personal information</h3>
|
|
||||||
<p>You agree to the use of your data (if any is gathered) in accordance with Parity’s privacy policies: <a href='https://parity.io/legal.html' target='_blank'>https://parity.io/legal.html</a>. This policy explains how Parity treats your personal information (if any is gathered), and protects your privacy, when you use Parity’s Products.</p>
|
|
||||||
|
|
||||||
<h3>Content in Parity</h3>
|
|
||||||
<p>You understand that all information and data (such as smart contracts, data files, written text, computer software, music, audio files or other sounds, photographs, videos or other images) which you may have access to as part of, or through your use of, Parity’s Product are the sole responsibility of the person from which such content originated. All such information is referred to below as the “Content”.</p>
|
|
||||||
<p>You should be aware that Content presented to you through Parity or Parity’s Product may be protected by intellectual property rights which are owned by thisrd parties who may provide that Content to Parity (or by other persons or companies on their behalf). You may not modify, rent, lease, loan, sell, distribute or create derivative works based on this Content (either in whole or in part) unless you have been specifically told that you may do so by Parity or by the owners of that Content, in a separate agreement.</p>
|
|
||||||
<p>You understand that by using Parity or Parity’s Products you may be exposed to Content that you may find offensive, indecent or objectionable and that, in this respect, you use Parity or Parity’s Products at your own risk.</p>
|
|
||||||
<p>You agree that you are solely responsible for (and that Parity has no responsibility to you or to any third party for) any Content that you create, transmit or display while using Parity or Parity’s Products and for the consequences of your actions (including any loss or damage which Parity may suffer) by doing so.</p>
|
|
||||||
|
|
||||||
<h3>Proprietary rights</h3>
|
|
||||||
<p>You acknowledge and agree that Parity own all legal right, title and interest in and to the Parity and Parity’s Products, including any intellectual property rights which subsist in Parity and Parity’s Products (whether those rights happen to be registered or not, and wherever in the world those rights may exist).</p>
|
|
||||||
<p>Unless you have agreed otherwise in writing with Parity, nothing in the Terms gives you a right to use any of Parity’s trade names, trade marks, service marks, logos, domain names, and other distinctive brand features.</p>
|
|
||||||
<p>If you have been given an explicit right to use any of these brand features in a separate written agreement with Parity, then you agree that your use of such features shall be in compliance with that agreement, any applicable provisions of these terms, and Parity’s brand feature use guidelines as updated from time to time. These guidelines can be viewed online at <a href='https://parity.io/press.html' target='_blank'>https://parity.io/press.html</a>.</p>
|
|
||||||
<p>Parity acknowledges and agrees that it obtains no right, title or interest from you (or your licensors) under these terms in or to any content that you submit, post, transmit or display on, or through, Parity, including any intellectual property rights which subsist in that content (whether those rights happen to be registered or not, and wherever in the world those rights may exist). Unless you have agreed otherwise in writing with Parity, you agree that you are responsible for protecting and enforcing those rights and that Parity has no obligation to do so on your behalf.</p>
|
|
||||||
<p>You agree that you shall not remove, obscure, or alter any proprietary rights notices (including copyright and trade mark notices) which may be affixed to or contained within Parity or Parity’s Products.</p>
|
|
||||||
<p>Unless you have been expressly authorized to do so in writing by Parity, you agree that in using Parity, you will not use any trade mark, service mark, trade name, logo of any company or organization in a way that is likely or intended to cause confusion about the owner or authorized user of such marks, names or logos.</p>
|
|
||||||
|
|
||||||
<h3>License Restrictions from Parity</h3>
|
|
||||||
<p>You may not (and you may not permit anyone else to) copy, modify, create a derivative work of, reverse engineer, decompile or otherwise attempt to extract the source code of the Parity, Parity’s Products or any part thereof, unless this is expressly permitted by our Software Licence Terms or required by law, or unless you have been specifically told that you may do so by Parity, in writing.</p>
|
|
||||||
<p>Unless Parity has given you specific written permission to do so, you may not assign (or grant a sub-licence of) your rights to use Parity’s Products, grant a security interest in or over your rights to use the Parity’s Products, or otherwise transfer any part of your rights to use the Parity’s Products.</p>
|
|
||||||
|
|
||||||
<h3>Content licence from you</h3>
|
|
||||||
<p>You retain copyright and any other rights you already hold in content which you submit, post or display on or through, Parity.</p>
|
|
||||||
|
|
||||||
<h3>Ending your relationship with Parity</h3>
|
|
||||||
<p>The Terms will continue to apply until terminated by either you or Parity as set out below.</p>
|
|
||||||
<p>Parity may at any time, terminate its legal agreement with you if:</p>
|
|
||||||
<ol>
|
|
||||||
<li>you have breached any provision of these Terms (or have acted in manner which clearly shows that you do not intend to, or are unable to comply with the provisions of these terms); or</li>
|
|
||||||
<li>Parity is required to do so by law (for example, where the provision of Parity’s Product to you is, or becomes, unlawful); or</li>
|
|
||||||
<li>the partner with whom Parity offered products or services to you has terminated its relationship with Parity or ceased to offer products or services to you; or</li>
|
|
||||||
<li>Parity is transitioning to no longer providing products or services to users in the country in which you are resident or from which you use the service; or</li>
|
|
||||||
<li>the provision of products or services to you by Parity is, in Parity’s opinion, no longer commercially viable.</li>
|
|
||||||
<li>When these Terms come to an end, all of the legal rights, obligations and liabilities that you and Parity have benefited from, been subject to (or which have accrued over time whilst the Terms have been in force) or which are expressed to continue indefinitely, shall be unaffected by this cessation, and the England and Wales jurisdiction choice shall continue to apply to such rights, obligations and liabilities indefinitely.</li>
|
|
||||||
</ol>
|
|
||||||
|
|
||||||
<h3>ACKNOWLEDGEMENT AND ACCEPTANCE OF ALL RISKS, EXCLUSION OF WARRANTIES</h3>
|
|
||||||
<p>THE USER EXPRESSLY KNOWS AND AGREES THAT THE USER IS USING PARITY OR PARITY’S PRODUCTS AT THE USER’S SOLE RISK. THE USER REPRESENTS THAT THE USER HAS AN ADEQUATE UNDERSTANDING OF THE RISKS, USAGE AND INTRICACIES OF CRYPTOGRAPHIC TOKENS AND BLOCKCHAIN-BASED OPEN SOURCE SOFTWARE, PARITY.</p>
|
|
||||||
<p>YOU EXPRESSLY UNDERSTAND AND AGREE THAT YOUR USE OF PARITY’S PRODUCTS IS AT YOUR SOLE RISK AND THAT PARITY’S PRODUCTS ARE PROVIDED "AS IS" AND “AS AVAILABLE.”</p>
|
|
||||||
<p>IN PARTICULAR, PARITY, ITS SUBSIDIARIES AND AFFILIATES, AND ITS LICENSORS DO NOT REPRESENT OR WARRANT TO YOU THAT:</p>
|
|
||||||
<p>(A) YOUR USE OF PARITY OR PARITY’S PRODUCTS WILL MEET YOUR REQUIREMENTS,</p>
|
|
||||||
<p>(B) YOUR USE OF PARITY OR PARITY’S PRODUCTS WILL BE UNINTERRUPTED, TIMELY, SECURE OR FREE FROM ERROR,</p>
|
|
||||||
<p>(C) ANY INFORMATION OBTAINED BY YOU AS A RESULT OF YOUR USE OF PARITY OR PARITY’S PRODUCTS WILL BE ACCURATE OR RELIABLE, AND</p>
|
|
||||||
<p>(D) THAT DEFECTS IN THE OPERATION OR FUNCTIONALITY OF ANY SOFTWARE PROVIDED TO YOU AS PART OF PARITY’S PRODUCTS WILL BE CORRECTED.</p>
|
|
||||||
<p>ANY MATERIAL DOWNLOADED OR OTHERWISE OBTAINED THROUGH THE USE OF PARITY OR PARITY’S PRODUCTS IS DONE AT YOUR OWN DISCRETION AND RISK AND THAT YOU WILL BE SOLELY RESPONSIBLE FOR ANY DAMAGE TO YOUR COMPUTER SYSTEM OR OTHER DEVICE OR LOSS OF DATA OR ECONOMIC LOSS THAT RESULTS FROM THE DOWNLOAD OF ANY SUCH MATERIAL.</p>
|
|
||||||
<p>NO ADVICE OR INFORMATION, WHETHER ORAL OR WRITTEN, OBTAINED BY YOU FROM PARITY OR THROUGH OR FROM PARITY’S PRODUCTS SHALL CREATE ANY WARRANTY NOT EXPRESSLY STATED IN THE TERMS.</p>
|
|
||||||
<p>PARITY FURTHER EXPRESSLY DISCLAIMS ALL WARRANTIES AND CONDITIONS OF ANY KIND, WHETHER EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES AND CONDITIONS OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.</p>
|
|
||||||
|
|
||||||
<h3>EXCLUSION AND LIMITATION OF LIABILITY</h3>
|
|
||||||
<p>THE USER ACKNOWLEDGES AND AGREES THAT, TO THE FULLEST EXTENT PERMITTED BY ANY APPLICABLE LAW, THE DISCLAIMERS AND EXCLUSION OF LIABILITY CONTAINED HEREIN APPLY TO ANY AND ALL DAMAGES OR INJURY WHATSOEVER CAUSED BY OR RELATED TO RISKS OF, USE OF, OR INABILITY TO USE, PARITY UNDER ANY CAUSE OF ACTION WHATSOEVER OF ANY KIND IN ANY JURISDICTION, INCLUDING, WITHOUT LIMITATION, ACTIONS FOR BREACH OF WARRANTY, BREACH OF CONTRACT OR TORT (INCLUDING NEGLIGENCE) AND THAT NEITHER PARITY NOR THE PARITY TEAM SHALL BE LIABLE FOR ANY INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES, INCLUDING FOR LOSS OF PROFITS, GOODWILL OR DATA.</p>
|
|
||||||
<p>SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF CERTAIN WARRANTIES OR THE LIMITATION OR EXCLUSION OF LIABILITY FOR CERTAIN TYPES OF DAMAGES. THEREFORE, SOME OF THE ABOVE LIMITATIONS IN THIS SECTION MAY NOT APPLY TO A USER. IN PARTICULAR, NOTHING IN THESE TERMS SHALL AFFECT THE STATUTORY RIGHTS OF ANY USER OR EXCLUDE INJURY ARISING FROM ANY WILLFUL MISCONDUCT OR FRAUD OF PARITY.</p>
|
|
||||||
<p>SUBJECT TO ANY LIABILITY WHICH MAY NOT BE EXCLUDED, YOU EXPRESSLY UNDERSTAND AND AGREE THAT PARITY, ITS SUBSIDIARIES AND AFFILIATES, AND ITS LICENSORS SHALL NOT BE LIABLE TO YOU FOR:</p>
|
|
||||||
<p>(A) ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL CONSEQUENTIAL OR EXEMPLARY DAMAGES WHICH MAY BE INCURRED BY YOU, HOWEVER CAUSED AND UNDER ANY THEORY OF LIABILITY. THIS SHALL INCLUDE, BUT NOT BE LIMITED TO, ANY LOSS OF PROFIT (WHETHER INCURRED DIRECTLY OR INDIRECTLY), ANY LOSS OF GOODWILL OR BUSINESS REPUTATION, ANY LOSS OF DATA SUFFERED, COST OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, OR OTHER INTANGIBLE LOSS;</p>
|
|
||||||
<p>(B) ANY LOSS OR DAMAGE WHICH MAY BE INCURRED BY YOU, INCLUDING BUT NOT LIMITED TO LOSS OR DAMAGE AS A RESULT OF:</p>
|
|
||||||
<p>(I) ANY RELIANCE PLACED BY YOU ON THE COMPLETENESS, ACCURACY OR EXISTENCE OF ANY ADVERTISING, OR AS A RESULT OF ANY RELATIONSHIP OR TRANSACTION BETWEEN YOU AND ANY ADVERTISER OR SPONSOR WHOSE ADVERTISING APPEARS ON PARITY’S PRODUCTS;</p>
|
|
||||||
<p>(II) ANY CHANGES WHICH PARITY MAY MAKE TO PARITY’S PRODUCTS, OR FOR ANY PERMANENT OR TEMPORARY CESSATION IN THE PROVISION OF PARITY’S PRODUCTS (OR ANY FEATURES WITHIN PARITY’S PRODUCTS);</p>
|
|
||||||
<p>(III) THE DELETION OF, CORRUPTION OF, OR FAILURE TO STORE, ANY CONTENT AND OTHER COMMUNICATIONS DATA MAINTAINED OR TRANSMITTED BY OR THROUGH YOUR USE OF PARITY’S PRODUCTS;</p>
|
|
||||||
<p>(IV) YOUR FAILURE TO PROVIDE PARITY WITH ACCURATE ACCOUNT INFORMATION (IF THIS IS REQUIRED);</p>
|
|
||||||
<p>(V) YOUR FAILURE TO KEEP YOUR PASSWORD OR ACCOUNT DETAILS SECURE AND CONFIDENTIAL;</p>
|
|
||||||
<p>THE LIMITATIONS ON PARITY’S LIABILITY TO YOU SHALL APPLY WHETHER OR NOT PARITY HAS BEEN ADVISED OF OR SHOULD HAVE BEEN AWARE OF THE POSSIBILITY OF ANY SUCH LOSSES ARISING.</p>
|
|
||||||
|
|
||||||
<h3>Copyright and trade mark policies</h3>
|
|
||||||
<p>It is Parity’s policy to respond to notices of alleged copyright infringement that comply with applicable international intellectual property law (including, in the United States, the Digital Millennium Copyright Act) and if Parity is put on notice and it is under Parity’s control and terminating the accounts of repeat infringers.</p>
|
|
||||||
|
|
||||||
<h3>Other content</h3>
|
|
||||||
<p>Services provided may include hyperlinks to other web sites, smart contracts or content or resources. Parity may have no control over any web sites or resources which are provided by companies or persons other than Parity.</p>
|
|
||||||
<p>You acknowledge and agree that Parity is not responsible for the availability of any such external sites or resources, and does not endorse any advertising, products or other materials on or available from such web sites or resources.</p>
|
|
||||||
<p>You acknowledge and agree that Parity is not liable for any loss or damage which may be incurred by you as a result of the availability of those external sites or resources, or as a result of any reliance placed by you on the completeness, accuracy or existence of any advertising, products or other materials on, or available from, such web sites or resources.</p>
|
|
||||||
|
|
||||||
<h3>Changes to the Terms</h3>
|
|
||||||
<p>Parity may make changes to these from time to time. When these changes are made, Parity will make a new copy of these terms available at https://parity.io/legal.html and any new terms will be made available to you from within, or through, the affected Parity’s Product.</p>
|
|
||||||
<p>You understand and agree that if you use Parity or Parity’s Products after the date on which the Terms have changed, Parity will treat your use as acceptance of the updated terms.</p>
|
|
||||||
|
|
||||||
<h3>General legal terms</h3>
|
|
||||||
<p>Sometimes when you use Parity or Parity’s Products, you may (as a result of, or in connection with your use of these products) use a service or download a piece of software, or smart contract, or purchase goods, which are provided by another person or company. Your use of these other services, software, smart contract or goods may be subject to separate terms between you and the company or person concerned. If so, these Terms do not affect your legal relationship with these other companies or individuals.</p>
|
|
||||||
<p>These Terms constitute the whole legal agreement between you and Parity and govern your use of Parity and Parity’s Products (but excluding any products or services which Parity may provide to you under a separate written agreement), and completely replace any prior agreements between you and Parity in relation to Parity and Parity’s Products.</p>
|
|
||||||
<p>You agree that Parity may provide you with notices, including those regarding changes to the Terms, by postings on the affected Parity’s Product.</p>
|
|
||||||
<p>You agree that if Parity does not exercise or enforce any legal right or remedy which is contained in these Terms (or which Parity has the benefit of under any applicable law), this will not be taken to be a formal waiver of Parity’s rights and that those rights or remedies will still be available to Parity.</p>
|
|
||||||
<p>If any court of law, having the jurisdiction to decide on this matter, rules that any provision of these Terms is invalid, then that provision will be removed from the Terms without affecting the rest of the Terms. The remaining provisions of the Terms will continue to be valid and enforceable.</p>
|
|
||||||
<p>You acknowledge and agree that each member of the group of companies of which Parity is the parent shall be third party beneficiaries to these Terms and that such other companies shall be entitled to directly enforce, and rely upon, any provision of the Terms which confers a benefit on (or rights in favor of) them. Other than this, no other person or company shall be third party beneficiaries to these Terms.</p>
|
|
||||||
<p>These Terms, and your relationship with Parity under these Terms, shall be governed by the laws of England and Wales, United Kingdom without regard to its conflict of laws provisions. You and Parity agree to submit to the exclusive jurisdiction of the courts located within England, United Kingdom to resolve any legal matter arising from these Terms (subject to the Dispute Resolution clause below). Notwithstanding this, you agree that Parity shall still be allowed to apply for injunctive remedies (or an equivalent type of urgent legal relief) in any jurisdiction.</p>
|
|
||||||
|
|
||||||
<h3>Dispute Resolution</h3>
|
|
||||||
<p>All disputes or claims arising out of, relating to, or in connection with the Terms, the breach thereof, or use of Parity shall be finally settled under the Rules of Arbitration of the International Chamber of Commerce by one or more arbitrators appointed in accordance with said Rules. All claims between the parties relating to these Terms that are capable of being resolved by arbitration, whether sounding in contract, tort, or otherwise, shall be submitted to ICC arbitration. Prior to commencing arbitration, the parties have a duty to negotiate in good faith and attempt to resolve their dispute in a manner other than by submission to ICC arbitration. The arbitration panel shall consist of one arbitrator only, unless the ICC Court of Arbitration determines that the dispute is such as to warrant three arbitrators. If the Court determines that one arbitrator is sufficient, then such arbitrator shall be a UK resident. If the Court determines that three arbitrators are necessary, then each party shall have 30 days to nominate an arbitrator of its choice - in the case of the Claimant, measured from receipt of notification of the ICC Court’s decision to have three arbitrators; in the case of Respondent, measured from receipt of notification of Claimant’s nomination. All nominations must be UK residents. If a party fails to nominate an arbitrator, the Court will do so. The Court shall also appoint the chairman. All arbitrators shall be and remain “independent” of the parties involved in the arbitration. The place of arbitration shall be England, United Kingdom. The language of the arbitration shall be English. In deciding the merits of the dispute, the tribunal shall apply the laws of England and Wales and any discovery shall be limited and shall not involve any depositions or any other examinations outside of a formal hearing. The tribunal shall not assume the powers of amiable compositeur or decide the case ex aequo et bono. In the final award, the tribunal shall fix the costs of the arbitration and decide which of the parties shall bear such costs in what proportion. Every award shall be binding on the parties. The parties undertake to carry out the award without delay and waive their right to any form of recourse against the award in so far as such waiver can validly be made.</p>
|
|
||||||
|
|
||||||
<h3>Additional Terms for Enterprise Use</h3>
|
|
||||||
<p>If you are a business entity, then the individual accepting on behalf of the entity (for the avoidance of doubt, for business entities, in these Terms, "you" means the entity) represents and warrants that he or she has the authority to act on your behalf, that you represent that you are duly authorized to do business in the country or countries where you operate, and that your employees, officers, representatives, and other agents accessing Parity’s Products are duly authorized to access Parity and to legally bind you to these Terms.</p>
|
|
||||||
<p>Subject to these Terms and subject to the Software Licence Terms, Parity grants you a non-exclusive, non-transferable licence to install and use Parity solely on machines intended for use by your employees, officers, representatives, and agents in connection with your business entity, and provided that their use of Parity will be subject to these Terms and Parity’s Products software licence terms.</p>
|
|
||||||
|
|
||||||
<Checkbox
|
<Checkbox
|
||||||
className={ styles.accept }
|
className={ styles.accept }
|
||||||
label={
|
label={
|
||||||
|
215
js/src/shell/FirstRun/TnC/tnc.md
Normal file
215
js/src/shell/FirstRun/TnC/tnc.md
Normal file
@ -0,0 +1,215 @@
|
|||||||
|
# LEGAL WARNING SHORT VERSION
|
||||||
|
|
||||||
|
## Disclaimer of Liability and Warranties
|
||||||
|
|
||||||
|
- The user expressly acknowledges and agrees that Parity Technologies Limited makes the Parity client available to the user at the user's sole risk.
|
||||||
|
|
||||||
|
- The user represents that the user has an adequate understanding of the risks, usage and intricacies of cryptographic tokens and blockchain-based open source software, eth platform and eth.
|
||||||
|
|
||||||
|
- The user acknowledges and agrees that, to the fullest extent permitted by any applicable law, the disclaimers of liability contained herein apply to any and all damages or injury whatsoever caused by or related to risks of, use of, or inability to use, the Parity client under any cause or action whatsoever of any kind in any jurisdiction, including, without limitation, actions for breach of warranty, breach of contract or tort (including negligence) and that Parity Technologies Limited shall not be liable for any indirect, incidental, special, exemplary or consequential damages, including for loss of profits, goodwill or data.
|
||||||
|
|
||||||
|
- Some jurisdictions do not allow the exclusion of certain warranties or the limitation or exclusion of liability for certain types of damages. Therefore, some of the above limitations in this section may not apply to a user. In particular, nothing in these terms shall affect the statutory rights of any user or limit or exclude liability for death or physical injury arising from the negligence or wilful misconduct of Parity Technologies Limited or for fraud or fraudulent misrepresentation.
|
||||||
|
|
||||||
|
- All rights reserved by Parity Technologies Limited. Licensed to the public under the GPL v3: [https://www.gnu.org/licenses/gpl-3.0.txt](http://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
# LEGAL WARNING LONG VERSION
|
||||||
|
|
||||||
|
The following Terms and Conditions ("Terms") govern the use of Parity Technologies Limited's open source software product ("Parity"). Prior to any use of Parity or any of Parity Technologies Limited's products ("Parity Technologies' Products"), you ("User" or "you") confirm on your own behalf and on behalf of anyone who uses Parity on your behalf that you (and they) understand, expressly agree to and will comply with all of the Terms. All capitalized words and expressions in these Terms will have the effect and meaning given to them in the Terms. The group of developers and other personnel that is now, or will be, employed by, or contracted with, or affiliated with, Parity Technologies Limited ("Parity Technologies" or "we") is termed the "Parity Technologies Team".
|
||||||
|
|
||||||
|
## Acknowledgement of Risks
|
||||||
|
|
||||||
|
The User acknowledges the following serious risks to any use of Parity and expressly agrees not to hold liable Parity Technologies or the Parity Technologies Team should any of these risks occur:
|
||||||
|
|
||||||
|
## Risk of Security Weaknesses in the Parity Core Infrastructure Software
|
||||||
|
|
||||||
|
Parity uses open-source libraries and components developed by third parties. While Parity Technologies Limited generally aims to use only widely adopted open-source technology and develop it in line with industry standards, such open-source technology may contain bugs and errors and may not function correctly in all circumstances. As a result, there is a risk that Parity Technologies or the Parity Technologies Team may have introduced unintentional weaknesses or bugs into the core infrastructural elements of Parity causing the system to lose Ethereum tokens ("ETH") stored in one or more User accounts or other accounts or lose sums of other valued tokens.
|
||||||
|
|
||||||
|
## Risk of Weaknesses or Exploitable Breakthroughs in the Field of Cryptography
|
||||||
|
|
||||||
|
Cryptography is an art, not a science, and the state of the art can advance over time. Advances in code cracking, or technical advances such as the development of quantum computers, could present risks to cryptocurrencies and Parity, which could result in the theft or loss of ETH. To the extent possible, Parity Technologies intends to update the protocol underlying Parity to account for any advances in cryptography and to incorporate additional security measures, but it cannot predict the future of cryptography or guarantee that any security updates will be made, timely or successful.
|
||||||
|
|
||||||
|
## Risk of Ether Mining Attacks
|
||||||
|
|
||||||
|
As with other cryptocurrencies, the blockchain accessed by Parity is susceptible to mining attacks, including but not limited to double-spend attacks, majority mining power attacks, "selfish-mining" attacks, and race condition attacks. Any successful attacks present a risk to the Ethereum ecosystem, expected proper execution and sequencing of ETH transactions, and expected proper execution and sequencing of contract computations. Despite the efforts of Parity Technologies and the Parity Technologies Team, known or novel mining attacks may be successful.
|
||||||
|
|
||||||
|
## Risk of Rapid Adoption and Insufficiency of Computational Application Processing Power on the
|
||||||
|
|
||||||
|
## Ethereum Network
|
||||||
|
|
||||||
|
If Ethereum is rapidly adopted, the demand for transaction processing and distributed application computations could rise dramatically and at a pace that exceeds the rate with which ETH miners can bring online additional mining power. Under such a scenario, the entire Ethereum ecosystem could become destabilized, due to the increased cost of running distributed applications. In turn, this could dampen interest in the Ethereum ecosystem and ETH. Insufficiency of computational resources and an associated rise in the price of ETH could result in businesses being unable to acquire scarce computational resources to run their distributed applications. This would represent revenue losses to businesses or worst case, cause businesses to cease operations because such operations have become uneconomical due to distortions in the crypto-economy.
|
||||||
|
|
||||||
|
## Risk of temporary network incoherence
|
||||||
|
|
||||||
|
We recommend any groups handling large or important transactions to maintain a voluntary 24 hour waiting period on any ETH deposited. If we become aware that the integrity of the network is at risk due to issues with Parity, we will endeavour to publish patches in a timely fashion to address the issues. We will endeavour to provide solutions within the voluntary 24 hour waiting period.
|
||||||
|
|
||||||
|
## Use of Parity by you
|
||||||
|
|
||||||
|
You agree to use Parity only for purposes that are permitted by (a) these Terms (including the Software Licence Terms, as defined below) and (b) any applicable law or regulation in the relevant jurisdictions (including any laws regarding the export of data or software to and from the United Kingdom or other relevant countries).
|
||||||
|
|
||||||
|
You agree that you will not engage in any activity that: (1) interferes with or disrupts Parity's or Parity Technologies' Products' functioning (or the networks which are connected to Parity), or (2) is calculated or likely to do so.
|
||||||
|
|
||||||
|
Unless you have been specifically permitted to do so in a separate agreement with Parity Technologies, you agree that you will not reproduce, duplicate, copy, sell, trade or resell Parity or Parity Technologies' Products for any purpose unless than in accordance with the terms of the software licence terms available here: https [://www.gnu.org/licenses/gpl-3.0.txt](http://www.gnu.org/licenses/gpl-3.0.txt)("Software Licence Terms").
|
||||||
|
|
||||||
|
You agree that you are solely responsible for (and that Parity Technologies has no responsibility to you or to any third party for) any breach of your obligations under these Terms and for the consequences (including any loss or damage which Parity Technologies may suffer) of any such breach.
|
||||||
|
|
||||||
|
## Privacy and your personal information
|
||||||
|
|
||||||
|
You agree to the use of your data (if any is gathered) in accordance with Parity Technologies' privacy policies: [https://parity.io/legal.html](https://parity.io/legal.html). This policy explains how Parity Technologies treats your personal information (if any is gathered), and protects your privacy, when you use Parity Technologies' Products.
|
||||||
|
|
||||||
|
## Content in Parity
|
||||||
|
|
||||||
|
You understand that all information and data (such as smart contracts, data files, written text, computer software, music, audio files or other sounds, photographs, videos or other images) which you may have access to as part of, or through your use of, Parity or Parity Technologies' Products are the sole responsibility of the person from which such content originated. All such information is referred to below as "Content".
|
||||||
|
|
||||||
|
You should be aware that Content presented to you through Parity or Parity Technologies' Products, including but not limited to any advertisements and any sponsored Content within Parity, may be protected by intellectual property rights which are owned by the possible sponsors or advertisers who may provide that Content to Parity Technologies (or by other persons or companies on their behalf). You may not modify, rent, lease, loan, sell, distribute or create derivative works based on this Content (either in whole or in part) unless you have been specifically told that you may do so by Parity Technologies or by the owners of that Content, in a separate agreement.
|
||||||
|
|
||||||
|
Parity Technologies reserves the right (but shall have no obligation) to pre-screen, review, flag, filter, modify, refuse or remove any or all Content presented.
|
||||||
|
|
||||||
|
You understand that by using Parity or Parity Technologies' Products you may be exposed to Content that you may find offensive, indecent or objectionable and that, in this additional respect, you use Parity or Parity Technologies' Products at your own risk.
|
||||||
|
|
||||||
|
You agree that you are solely responsible for (and that Parity Technologies has no responsibility to you or to any third party for) any Content that you create, transmit or display while using Parity or Parity Technologies' Products and for the consequences of your actions (including any loss or damage which Parity Technologies may suffer) by doing so.
|
||||||
|
|
||||||
|
## Proprietary rights
|
||||||
|
|
||||||
|
You acknowledge and agree that Parity Technologies owns all legal right, title and interest in and to Parity and Parity Technologies' Products, including any intellectual property rights which subsist in Parity and Parity Technologies' Products (whether those rights happen to be registered or not, and wherever in the world those rights may exist).
|
||||||
|
|
||||||
|
Unless you have agreed otherwise in writing with Parity Technologies, nothing in the Terms gives you a right to use any of Parity Technologies' trade names, trade marks, service marks, logos, domain names, and other distinctive brand features.
|
||||||
|
|
||||||
|
If you have been given an explicit right to use any of these brand features in a separate written agreement with Parity Technologies, then you agree that your use of such features shall be in compliance with that agreement, any applicable provisions of these Terms, and Parity Technologies' brand feature use guidelines as updated from time to time. These guidelines can be viewed online at https://parity.io/press.html.
|
||||||
|
|
||||||
|
You retain copyright and any other rights you already hold in Content which you submit, post or display on or through, Parity. By posting Content, you grant to Parity Technologies a perpetual, royalty-free, non-exclusive, and irrevocable right and licence to use such Content in connection with its provision of Parity and Parity Technologies' Products. Parity Technologies acknowledges and agrees that, except as expressly provided in these Terms or the Software Licence Terms, it obtains no right, title or interest from you (or your licensors) in or to any Content that you submit, post, transmit or display on, or through, Parity, including any intellectual property rights which subsist in that Content (whether those rights happen to be registered or not, and wherever in the world those rights may exist). Unless you have agreed otherwise in writing with Parity Technologies, you agree that you are responsible for protecting and enforcing those rights and that Parity Technologies has no obligation to do so on your behalf.
|
||||||
|
|
||||||
|
You agree that you shall not remove, obscure, or alter any proprietary rights notices (including copyright and trade mark notices) which may be affixed to or contained within Parity or Parity Technologies' Products.
|
||||||
|
|
||||||
|
Unless you have been expressly authorized to do so in writing by Parity Technologies, you agree that in using Parity, you will not use any trade mark, service mark, trade name, logo of any company or organization in a way that is likely or intended to cause confusion about the owner or authorized user of such marks, names or logos.
|
||||||
|
|
||||||
|
## License Restrictions from Parity Technologies
|
||||||
|
|
||||||
|
You may not (and you may not permit anyone else to) copy, modify, create a derivative work of, reverse engineer, decompile or otherwise attempt to extract the source code of Parity, Parity Technologies' Products or any part thereof, unless this is expressly permitted by our Software Licence Terms or required by law, or unless you have been specifically told that you may do so by Parity Technologies, in writing.
|
||||||
|
|
||||||
|
Unless Parity Technologies has given you specific written permission to do so, you may not assign (or grant a sub- licence of) your rights to use Parity Technologies' Products, grant a security interest in or over your rights to use Parity Technologies' Products, or otherwise transfer any part of your rights to use Parity Technologies' Products.
|
||||||
|
|
||||||
|
## Software updates
|
||||||
|
|
||||||
|
Parity may automatically download and install updates from time to time from Parity Technologies. These updates are designed to improve, enhance and further develop Parity and may take the form of bug fixes, enhanced functions, new software modules and completely new versions. You agree to receive such updates (and permit Parity Technologies to deliver these to you) as part of your use of Parity and Parity Technologies' Products.
|
||||||
|
|
||||||
|
## Ending your relationship with Parity Technologies
|
||||||
|
|
||||||
|
The Terms will continue to apply until terminated by either you or Parity Technologies as set out below. You may at any time, terminate your legal agreement with Parity Technologies. Parity Technologies may at any time, terminate its legal agreement with you if:
|
||||||
|
|
||||||
|
(A) you have breached any provision of these Terms (or have acted in manner which indicates that you do not intend to, or are unable to comply with the provisions of these terms); or
|
||||||
|
|
||||||
|
(B) Parity Technologies is required to do so by law (for example, where the provision of Parity or any of Parity Technologies' Products to you is, or becomes, unlawful); or
|
||||||
|
|
||||||
|
(C) any partner with whom Parity Technologies offered products or services to you has terminated its relationship with Parity Technologies or ceased to offer products or services to you; or
|
||||||
|
|
||||||
|
(D) Parity Technologies is transitioning to no longer providing products or services to users in the country in which you are resident or from which you use the service; or
|
||||||
|
|
||||||
|
(E) the provision of products or services to you by Parity Technologies is, in Parity Technologies' opinion, no longer commercially viable.
|
||||||
|
|
||||||
|
When these Terms come to an end for any reason:
|
||||||
|
|
||||||
|
(A) you must immediately cease use of Parity and Parity Technologies' Products;
|
||||||
|
|
||||||
|
(B) you must delete or remove Parity or Parity Technologies' Products from all computer equipment in your possession or under your control; and
|
||||||
|
|
||||||
|
(C) all of the rights granted to you in these Terms shall cease.
|
||||||
|
|
||||||
|
Except as set out above, termination of these Terms shall not affect any rights, remedies, obligations and liabilities that have accrued to you or Parity Technologies up to the date of termination, including the right to claim damages in respect of any breach of these Terms which existed at or before the date of termination, and the courts of England and Wales shall continue to have jurisdiction in respect of such rights, obligations and liabilities indefinitely.
|
||||||
|
|
||||||
|
## Acknowledgement and acceptance of all risks, exclusion of warranties
|
||||||
|
|
||||||
|
The User expressly acknowledges and agrees that the User is using Parity and Parity Technologies' Products at the User's sole risk. The User represents that the User has an adequate understanding of the risks, usage and intricacies of cryptographic tokens and blockchain-based open source software.
|
||||||
|
|
||||||
|
You expressly understand and agree that Parity and Parity Technologies' Products are provided "as is" and "as available."
|
||||||
|
|
||||||
|
Parity Technologies, its subsidiaries and affiliates, and its licensors do not represent or warrant to you that:
|
||||||
|
|
||||||
|
(a) your use of Parity or Parity Technologies' Products will meet your requirements,
|
||||||
|
|
||||||
|
(b) your use of Parity or Parity Technologies' Products will be uninterrupted, timely, secure or free from error,
|
||||||
|
|
||||||
|
(c) any information obtained by you as a result of your use of Parity or Parity Technologies' Products will be accurate or reliable, or
|
||||||
|
|
||||||
|
(d) defects in the operation or functionality of any software provided to you as part of Parity Technologies' Products will be corrected.
|
||||||
|
|
||||||
|
Any material downloaded or otherwise obtained through the use of Parity or Parity Technologies' Products is done at your own discretion and risk and you will be solely responsible for any damage to your computer system or other device or loss of data or economic loss that results from the download of any such material.
|
||||||
|
|
||||||
|
No advice or information, whether oral or written, obtained by you from Parity Technologies or through or from Parity Technologies' Products shall create any warranty not expressly stated in the Terms.
|
||||||
|
|
||||||
|
To the fullest extent permitted by applicable law, Parity Technologies expressly disclaims all implied warranties and conditions of any kind including, but not limited to, implied statutory warranties and conditions of merchantability, fitness for a particular purpose and non-infringement.
|
||||||
|
|
||||||
|
## Exclusion and limitation of liability
|
||||||
|
|
||||||
|
The User acknowledges and agrees that, to the fullest extent permitted by any applicable law, the disclaimers and exclusion of liability contained herein apply to any and all damages or injury whatsoever caused by or related to risks of, use of, or inability to use, Parity under any cause of action whatsoever of any kind in any jurisdiction, including, without limitation, actions for breach of warranty, breach of contract or tort (including negligence).
|
||||||
|
|
||||||
|
Some jurisdictions do not allow the exclusion of certain warranties or the limitation or exclusion of liability for certain types of damages. Therefore, some of the above limitations in this section may not apply to you. In particular, nothing in these terms shall affect the statutory rights of any User or limit or exclude liability for death or physical injury arising from the negligence or wilful misconduct of Parity Technologies or for fraud or fraudulent misrepresentation.
|
||||||
|
|
||||||
|
Subject to any liability which may not be excluded, you expressly understand and agree that Parity Technologies, its subsidiaries and affiliates, and its licensors shall not be liable to you for:
|
||||||
|
|
||||||
|
(a) any direct, indirect, incidental, special consequential or exemplary damages which may be incurred by you, however caused and under any theory of liability. This shall include, but not be limited to, any loss of profit (whether incurred directly or indirectly), any loss of goodwill or business reputation, any loss of data suffered, cost of procurement of substitute goods or services, or other intangible loss; or
|
||||||
|
|
||||||
|
(b) any loss or damage which may be incurred by you, including but not limited to loss or damage as a result of:
|
||||||
|
|
||||||
|
(i) any reliance placed by you on the completeness, accuracy or existence of any advertising, or as a result of any relationship or transaction between you and any advertiser or sponsor whose advertising appears on Parity Technologies' Products;
|
||||||
|
|
||||||
|
(ii) any changes which Parity Technologies may make to Parity Technologies' Products, or for any permanent or temporary cessation in the provision of Parity Technologies' Products (or any features within Parity Technologies' Products);
|
||||||
|
|
||||||
|
(iii) the deletion of, corruption of, or failure to store, any content and other communications data maintained or transmitted by or through your use of Parity Technologies' Products;
|
||||||
|
|
||||||
|
(iv) your failure to provide Parity Technologies with accurate account information (if this is required); and
|
||||||
|
|
||||||
|
(v) your failure to keep your password or account details secure and confidential.
|
||||||
|
|
||||||
|
The limitations on Parity Technologies' liability to you shall apply whether or not Parity Technologies has been advised of or should have been aware of the possibility of any such losses arising.
|
||||||
|
|
||||||
|
## Copyright and trade mark policies
|
||||||
|
|
||||||
|
It is Parity Technologies' policy to respond to notices of alleged copyright infringement that comply with applicable international intellectual property law (including, in the United States, the Digital Millennium Copyright Act) where Parity Technologies is put on notice and it is under Parity Technologies' control. In such cases Parity Technologies shall be entitled to terminate the accounts of repeat infringers.
|
||||||
|
|
||||||
|
## Possible Advertisements
|
||||||
|
|
||||||
|
Products or services provided may be supported by advertising revenue and may display advertisements and promotions. These advertisements may be targeted to the content of information stored on products or services, queries made through the products or services or other information.
|
||||||
|
|
||||||
|
The manner, mode and extent of advertising by Parity Technologies on Parity Technologies' Products are subject to change without specific notice to you.
|
||||||
|
|
||||||
|
You agree that Parity Technologies may place such advertising on products and services accessed to you through Parity Technologies' products.
|
||||||
|
|
||||||
|
## Other content
|
||||||
|
|
||||||
|
Parity Technologies' services provided may include hyperlinks to other web sites, smart contracts or content or resources. Parity Technologies may have no control over any web sites or resources which are provided by companies or persons other than Parity Technologies.
|
||||||
|
|
||||||
|
You acknowledge and agree that Parity Technologies is not responsible for the availability of any such external sites or resources, and does not endorse any advertising, products or other materials on or available from such web sites or resources.
|
||||||
|
|
||||||
|
You acknowledge and agree that Parity Technologies is not liable for any loss or damage which may be incurred by you as a result of the availability of those external sites or resources, or as a result of any reliance placed by you on the completeness, accuracy or existence of any advertising, products or other materials on, or available from, such web sites or resources.
|
||||||
|
|
||||||
|
## Changes to the Terms
|
||||||
|
|
||||||
|
Parity Technologies may make changes to these Terms from time to time. When these changes are made, Parity Technologies will make a new copy of these Terms available at [https://parity.io/legal.html](https://parity.io/legal.html)and any new terms will be made available to you from within, or through, Parity or the affected Parity Technologies' Product.
|
||||||
|
|
||||||
|
You understand and agree that if you use Parity or Parity Technologies' Products after the date on which the Terms have changed, Parity Technologies will treat your use as acceptance of the updated terms.
|
||||||
|
|
||||||
|
## General legal terms
|
||||||
|
|
||||||
|
Sometimes when you use Parity or Parity Technologies' Products, you may (as a result of, or in connection with your use of these products) use a service or download a piece of software, or smart contract, or purchase goods, which are provided by another person or company. Your use of these other services, software, smart contract or goods may be subject to separate terms between you and the company or person concerned. If so, these Terms do not affect your legal relationship with these other companies or individuals.
|
||||||
|
|
||||||
|
These Terms (including the Software Licence Terms) constitute the whole legal agreement between you and Parity Technologies and govern your use of Parity and Parity Technologies' Products (but excluding any products or services which Parity Technologies may provide to you under a separate written agreement), and completely replace any prior agreements between you and Parity Technologies in relation to Parity and Parity Technologies' Products.
|
||||||
|
|
||||||
|
You agree that Parity Technologies may provide you with notices, including those regarding changes to the Terms, by postings on Parity or the affected Parity Technologies' Product.
|
||||||
|
|
||||||
|
You agree that if Parity Technologies does not exercise or enforce any legal right or remedy which is contained in these Terms (or which Parity Technologies has the benefit of under any applicable law), this will not be taken to be a formal waiver of Parity Technologies' rights and that those rights or remedies will still be available to Parity Technologies.
|
||||||
|
|
||||||
|
If any court of law, having the jurisdiction to decide on this matter, rules that any provision of these Terms is invalid, then that provision will be removed from the Terms without affecting the rest of the Terms. The remaining provisions of the Terms will continue to be valid and enforceable.
|
||||||
|
|
||||||
|
You acknowledge and agree that each member of the group of companies of which Parity Technologies is the parent shall be third party beneficiaries to these Terms and that such other companies shall be entitled to directly enforce, and rely upon, any provision of the Terms which confers a benefit on (or rights in favour of) them. Other than this, no other person or company shall be third party beneficiaries to these Terms.
|
||||||
|
|
||||||
|
These Terms, and your relationship with Parity Technologies under these Terms, shall be governed by the laws of England and Wales, United Kingdom. You and Parity Technologies agree to submit to the exclusive jurisdiction of the courts located within England, United Kingdom to resolve any legal matter arising from these Terms (subject to the Dispute Resolution clause below). Notwithstanding this, you agree that Parity Technologies shall still be allowed to apply for injunctive remedies (or an equivalent type of urgent legal relief) in any jurisdiction.
|
||||||
|
|
||||||
|
## Dispute Resolution
|
||||||
|
|
||||||
|
All disputes or claims arising out of, relating to, or in connection with the Terms, the breach thereof, or use of Parity shall be finally settled under the Rules of Arbitration of the International Chamber of Commerce by one or more arbitrators appointed in accordance with said Rules. All claims between the parties relating to these Terms that are capable of being resolved by arbitration, whether sounding in contract, tort, or otherwise, shall be submitted to ICC arbitration. Prior to commencing arbitration, the parties have a duty to negotiate in good faith and attempt to resolve their dispute in a manner other than by submission to ICC arbitration. The arbitration panel shall consist of one arbitrator only, unless the ICC Court of Arbitration determines that the dispute is such as to warrant three arbitrators. If the Court determines that one arbitrator is sufficient, then such arbitrator shall be a UK resident. If the Court determines that three arbitrators are necessary, then each party shall have 30 days to nominate an arbitrator of its choice - in the case of the Claimant, measured from receipt of notification of the ICC Court's decision to have three arbitrators; in the case of Respondent, measured from receipt of notification of Claimant's nomination. All nominations must be UK residents. If a party fails to nominate an arbitrator, the Court will do so. The Court shall also appoint the chairman. All arbitrators shall be and remain "independent" of the parties involved in the arbitration. The place of arbitration shall be England, United Kingdom. The language of the arbitration shall be English. In deciding the merits of the dispute, the tribunal shall apply the laws of England and Wales and any discovery shall be limited and shall not involve any depositions or any other examinations outside of a formal hearing. The tribunal shall not assume the powers of amiable compositeur or decide the case ex aequo et bono. In the final award, the tribunal shall fix the costs of the arbitration and decide which of the parties shall bear such costs in what proportion. Every award shall be binding on the parties. The parties undertake to carry out the award without delay and waive their right to any form of recourse against the award in so far as such waiver can validly be made. Notwithstanding the foregoing, either party shall be entitled at any time to apply to the courts of England, United Kingdom for injunctive relief, or where a claim is incapable of being resolved by arbitration.
|
||||||
|
|
||||||
|
## Additional Terms for Enterprise Use
|
||||||
|
|
||||||
|
If you are a business entity, then the individual accepting on behalf of the entity (for the avoidance of doubt, for business entities, in these Terms, "you" means the entity) represents and warrants that he or she has the authority to act on your behalf, that you represent that you are duly authorized to do business in the country or countries where you operate, and that your employees, officers, representatives, and other agents accessing Parity Technologies' Products are duly authorized to access Parity and to legally bind you to these Terms.
|
||||||
|
|
||||||
|
Subject to these Terms and subject to the Software Licence Terms, Parity Technologies grants you a non-exclusive, non-transferable licence to use Parity solely on machines intended for use by your employees, officers, representatives, and agents in connection with your business entity, and provided that their use of Parity Technologies will be subject to these Terms (including the Software Licence Terms).
|
@ -44,4 +44,7 @@
|
|||||||
.accept {
|
.accept {
|
||||||
margin: 1.5em 0;
|
margin: 1.5em 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.markdown {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -80,7 +80,7 @@ class SyncWarning extends Component {
|
|||||||
/>
|
/>
|
||||||
}
|
}
|
||||||
checked={ dontShowAgain }
|
checked={ dontShowAgain }
|
||||||
onCheck={ this.handleCheck }
|
onClick={ this.handleCheck }
|
||||||
/>
|
/>
|
||||||
<Button
|
<Button
|
||||||
label={
|
label={
|
||||||
|
@ -99,7 +99,19 @@ module.exports = {
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
test: /\.md$/,
|
||||||
|
use: [
|
||||||
|
{
|
||||||
|
loader: 'html-loader',
|
||||||
|
options: {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
loader: 'markdown-loader',
|
||||||
|
options: {}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
test: /\.css$/,
|
test: /\.css$/,
|
||||||
include: [ /src/ ],
|
include: [ /src/ ],
|
||||||
|
@ -71,7 +71,7 @@ pub fn execute(cmd: AccountCmd) -> Result<String, String> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn keys_dir(path: String, spec: SpecType) -> Result<RootDiskDirectory, String> {
|
fn keys_dir(path: String, spec: SpecType) -> Result<RootDiskDirectory, String> {
|
||||||
let spec = spec.spec()?;
|
let spec = spec.spec(&::std::env::temp_dir())?;
|
||||||
let mut path = PathBuf::from(&path);
|
let mut path = PathBuf::from(&path);
|
||||||
path.push(spec.data_dir);
|
path.push(spec.data_dir);
|
||||||
RootDiskDirectory::create(path).map_err(|e| format!("Could not open keys directory: {}", e))
|
RootDiskDirectory::create(path).map_err(|e| format!("Could not open keys directory: {}", e))
|
||||||
|
@ -29,7 +29,7 @@ use ethcore::error::ImportError;
|
|||||||
use ethcore::miner::Miner;
|
use ethcore::miner::Miner;
|
||||||
use ethcore::verification::queue::VerifierSettings;
|
use ethcore::verification::queue::VerifierSettings;
|
||||||
use cache::CacheConfig;
|
use cache::CacheConfig;
|
||||||
use informant::{Informant, MillisecondDuration};
|
use informant::{Informant, FullNodeInformantData, MillisecondDuration};
|
||||||
use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool};
|
use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool};
|
||||||
use helpers::{to_client_config, execute_upgrades};
|
use helpers::{to_client_config, execute_upgrades};
|
||||||
use dir::Directories;
|
use dir::Directories;
|
||||||
@ -148,7 +148,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result<(), String> {
|
|||||||
let timer = Instant::now();
|
let timer = Instant::now();
|
||||||
|
|
||||||
// load spec file
|
// load spec file
|
||||||
let spec = cmd.spec.spec()?;
|
let spec = cmd.spec.spec(&cmd.dirs.cache)?;
|
||||||
|
|
||||||
// load genesis hash
|
// load genesis hash
|
||||||
let genesis_hash = spec.genesis_header().hash();
|
let genesis_hash = spec.genesis_header().hash();
|
||||||
@ -238,7 +238,17 @@ fn execute_import(cmd: ImportBlockchain) -> Result<(), String> {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let informant = Arc::new(Informant::new(client.clone(), None, None, None, None, cmd.with_color));
|
let informant = Arc::new(Informant::new(
|
||||||
|
FullNodeInformantData {
|
||||||
|
client: client.clone(),
|
||||||
|
sync: None,
|
||||||
|
net: None,
|
||||||
|
},
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
cmd.with_color,
|
||||||
|
));
|
||||||
|
|
||||||
service.register_io_handler(informant).map_err(|_| "Unable to register informant handler".to_owned())?;
|
service.register_io_handler(informant).map_err(|_| "Unable to register informant handler".to_owned())?;
|
||||||
|
|
||||||
let do_import = |bytes| {
|
let do_import = |bytes| {
|
||||||
@ -320,7 +330,7 @@ fn start_client(
|
|||||||
) -> Result<ClientService, String> {
|
) -> Result<ClientService, String> {
|
||||||
|
|
||||||
// load spec file
|
// load spec file
|
||||||
let spec = spec.spec()?;
|
let spec = spec.spec(&dirs.cache)?;
|
||||||
|
|
||||||
// load genesis hash
|
// load genesis hash
|
||||||
let genesis_hash = spec.genesis_header().hash();
|
let genesis_hash = spec.genesis_header().hash();
|
||||||
@ -517,7 +527,7 @@ fn execute_export_state(cmd: ExportState) -> Result<(), String> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn kill_db(cmd: KillBlockchain) -> Result<(), String> {
|
pub fn kill_db(cmd: KillBlockchain) -> Result<(), String> {
|
||||||
let spec = cmd.spec.spec()?;
|
let spec = cmd.spec.spec(&cmd.dirs.cache)?;
|
||||||
let genesis_hash = spec.genesis_header().hash();
|
let genesis_hash = spec.genesis_header().hash();
|
||||||
let db_dirs = cmd.dirs.database(genesis_hash, None, spec.data_dir);
|
let db_dirs = cmd.dirs.database(genesis_hash, None, spec.data_dir);
|
||||||
let user_defaults_path = db_dirs.user_defaults_path();
|
let user_defaults_path = db_dirs.user_defaults_path();
|
||||||
|
@ -250,6 +250,8 @@ usage! {
|
|||||||
or |c: &Config| otry!(c.mining).force_sealing.clone(),
|
or |c: &Config| otry!(c.mining).force_sealing.clone(),
|
||||||
flag_reseal_on_txs: String = "own",
|
flag_reseal_on_txs: String = "own",
|
||||||
or |c: &Config| otry!(c.mining).reseal_on_txs.clone(),
|
or |c: &Config| otry!(c.mining).reseal_on_txs.clone(),
|
||||||
|
flag_reseal_on_uncle: bool = false,
|
||||||
|
or |c: &Config| otry!(c.mining).reseal_on_uncle.clone(),
|
||||||
flag_reseal_min_period: u64 = 2000u64,
|
flag_reseal_min_period: u64 = 2000u64,
|
||||||
or |c: &Config| otry!(c.mining).reseal_min_period.clone(),
|
or |c: &Config| otry!(c.mining).reseal_min_period.clone(),
|
||||||
flag_reseal_max_period: u64 = 120000u64,
|
flag_reseal_max_period: u64 = 120000u64,
|
||||||
@ -524,6 +526,7 @@ struct Mining {
|
|||||||
author: Option<String>,
|
author: Option<String>,
|
||||||
engine_signer: Option<String>,
|
engine_signer: Option<String>,
|
||||||
force_sealing: Option<bool>,
|
force_sealing: Option<bool>,
|
||||||
|
reseal_on_uncle: Option<bool>,
|
||||||
reseal_on_txs: Option<String>,
|
reseal_on_txs: Option<String>,
|
||||||
reseal_min_period: Option<u64>,
|
reseal_min_period: Option<u64>,
|
||||||
reseal_max_period: Option<u64>,
|
reseal_max_period: Option<u64>,
|
||||||
@ -788,6 +791,7 @@ mod tests {
|
|||||||
flag_reseal_on_txs: "all".into(),
|
flag_reseal_on_txs: "all".into(),
|
||||||
flag_reseal_min_period: 4000u64,
|
flag_reseal_min_period: 4000u64,
|
||||||
flag_reseal_max_period: 60000u64,
|
flag_reseal_max_period: 60000u64,
|
||||||
|
flag_reseal_on_uncle: false,
|
||||||
flag_work_queue_size: 20usize,
|
flag_work_queue_size: 20usize,
|
||||||
flag_tx_gas_limit: Some("6283184".into()),
|
flag_tx_gas_limit: Some("6283184".into()),
|
||||||
flag_tx_time_limit: Some(100u64),
|
flag_tx_time_limit: Some(100u64),
|
||||||
@ -1012,6 +1016,7 @@ mod tests {
|
|||||||
engine_signer: Some("0xdeadbeefcafe0000000000000000000000000001".into()),
|
engine_signer: Some("0xdeadbeefcafe0000000000000000000000000001".into()),
|
||||||
force_sealing: Some(true),
|
force_sealing: Some(true),
|
||||||
reseal_on_txs: Some("all".into()),
|
reseal_on_txs: Some("all".into()),
|
||||||
|
reseal_on_uncle: None,
|
||||||
reseal_min_period: Some(4000),
|
reseal_min_period: Some(4000),
|
||||||
reseal_max_period: Some(60000),
|
reseal_max_period: Some(60000),
|
||||||
work_queue_size: None,
|
work_queue_size: None,
|
||||||
|
@ -262,6 +262,9 @@ Sealing/Mining Options:
|
|||||||
ext - reseal only on a new external transaction;
|
ext - reseal only on a new external transaction;
|
||||||
all - reseal on all new transactions
|
all - reseal on all new transactions
|
||||||
(default: {flag_reseal_on_txs}).
|
(default: {flag_reseal_on_txs}).
|
||||||
|
--reseal-on-uncle Force the node to author new blocks when a new uncle
|
||||||
|
block is imported.
|
||||||
|
(default: {flag_reseal_on_uncle})
|
||||||
--reseal-min-period MS Specify the minimum time between reseals from
|
--reseal-min-period MS Specify the minimum time between reseals from
|
||||||
incoming transactions. MS is time measured in
|
incoming transactions. MS is time measured in
|
||||||
milliseconds (default: {flag_reseal_min_period}).
|
milliseconds (default: {flag_reseal_min_period}).
|
||||||
|
@ -34,7 +34,7 @@ use rpc::{IpcConfiguration, HttpConfiguration, WsConfiguration, UiConfiguration}
|
|||||||
use rpc_apis::ApiSet;
|
use rpc_apis::ApiSet;
|
||||||
use parity_rpc::NetworkSettings;
|
use parity_rpc::NetworkSettings;
|
||||||
use cache::CacheConfig;
|
use cache::CacheConfig;
|
||||||
use helpers::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_price, replace_home, replace_home_for_db,
|
use helpers::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_price, replace_home, replace_home_and_local,
|
||||||
geth_ipc_path, parity_ipc_path, to_bootnodes, to_addresses, to_address, to_gas_limit, to_queue_strategy};
|
geth_ipc_path, parity_ipc_path, to_bootnodes, to_addresses, to_address, to_gas_limit, to_queue_strategy};
|
||||||
use params::{SpecType, ResealPolicy, AccountsConfig, GasPricerConfig, MinerExtras, Pruning, Switch};
|
use params::{SpecType, ResealPolicy, AccountsConfig, GasPricerConfig, MinerExtras, Pruning, Switch};
|
||||||
use ethcore_logger::Config as LogConfig;
|
use ethcore_logger::Config as LogConfig;
|
||||||
@ -526,6 +526,7 @@ impl Configuration {
|
|||||||
force_sealing: self.args.flag_force_sealing,
|
force_sealing: self.args.flag_force_sealing,
|
||||||
reseal_on_external_tx: reseal.external,
|
reseal_on_external_tx: reseal.external,
|
||||||
reseal_on_own_tx: reseal.own,
|
reseal_on_own_tx: reseal.own,
|
||||||
|
reseal_on_uncle: self.args.flag_reseal_on_uncle,
|
||||||
tx_gas_limit: match self.args.flag_tx_gas_limit {
|
tx_gas_limit: match self.args.flag_tx_gas_limit {
|
||||||
Some(ref d) => to_u256(d)?,
|
Some(ref d) => to_u256(d)?,
|
||||||
None => U256::max_value(),
|
None => U256::max_value(),
|
||||||
@ -662,7 +663,7 @@ impl Configuration {
|
|||||||
let mut buffer = String::new();
|
let mut buffer = String::new();
|
||||||
let mut node_file = File::open(path).map_err(|e| format!("Error opening reserved nodes file: {}", e))?;
|
let mut node_file = File::open(path).map_err(|e| format!("Error opening reserved nodes file: {}", e))?;
|
||||||
node_file.read_to_string(&mut buffer).map_err(|_| "Error reading reserved node file")?;
|
node_file.read_to_string(&mut buffer).map_err(|_| "Error reading reserved node file")?;
|
||||||
let lines = buffer.lines().map(|s| s.trim().to_owned()).filter(|s| !s.is_empty()).collect::<Vec<_>>();
|
let lines = buffer.lines().map(|s| s.trim().to_owned()).filter(|s| !s.is_empty() && !s.starts_with("#")).collect::<Vec<_>>();
|
||||||
if let Some(invalid) = lines.iter().find(|s| !is_valid_node_url(s)) {
|
if let Some(invalid) = lines.iter().find(|s| !is_valid_node_url(s)) {
|
||||||
return Err(format!("Invalid node address format given for a boot node: {}", invalid));
|
return Err(format!("Invalid node address format given for a boot node: {}", invalid));
|
||||||
}
|
}
|
||||||
@ -893,14 +894,20 @@ impl Configuration {
|
|||||||
let local_path = default_local_path();
|
let local_path = default_local_path();
|
||||||
let base_path = self.args.flag_base_path.as_ref().or_else(|| self.args.flag_datadir.as_ref()).map_or_else(|| default_data_path(), |s| s.clone());
|
let base_path = self.args.flag_base_path.as_ref().or_else(|| self.args.flag_datadir.as_ref()).map_or_else(|| default_data_path(), |s| s.clone());
|
||||||
let data_path = replace_home("", &base_path);
|
let data_path = replace_home("", &base_path);
|
||||||
let base_db_path = if self.args.flag_base_path.is_some() && self.args.flag_db_path.is_none() {
|
let is_using_base_path = self.args.flag_base_path.is_some();
|
||||||
// If base_path is set and db_path is not we default to base path subdir instead of LOCAL.
|
// If base_path is set and db_path is not we default to base path subdir instead of LOCAL.
|
||||||
|
let base_db_path = if is_using_base_path && self.args.flag_db_path.is_none() {
|
||||||
"$BASE/chains"
|
"$BASE/chains"
|
||||||
} else {
|
} else {
|
||||||
self.args.flag_db_path.as_ref().map_or(dir::CHAINS_PATH, |s| &s)
|
self.args.flag_db_path.as_ref().map_or(dir::CHAINS_PATH, |s| &s)
|
||||||
};
|
};
|
||||||
|
let cache_path = if is_using_base_path {
|
||||||
|
"$BASE/cache".into()
|
||||||
|
} else {
|
||||||
|
replace_home_and_local(&data_path, &local_path, &dir::CACHE_PATH)
|
||||||
|
};
|
||||||
|
|
||||||
let db_path = replace_home_for_db(&data_path, &local_path, &base_db_path);
|
let db_path = replace_home_and_local(&data_path, &local_path, &base_db_path);
|
||||||
let keys_path = replace_home(&data_path, &self.args.flag_keys_path);
|
let keys_path = replace_home(&data_path, &self.args.flag_keys_path);
|
||||||
let dapps_path = replace_home(&data_path, &self.args.flag_dapps_path);
|
let dapps_path = replace_home(&data_path, &self.args.flag_dapps_path);
|
||||||
let secretstore_path = replace_home(&data_path, &self.args.flag_secretstore_path);
|
let secretstore_path = replace_home(&data_path, &self.args.flag_secretstore_path);
|
||||||
@ -924,6 +931,7 @@ impl Configuration {
|
|||||||
Directories {
|
Directories {
|
||||||
keys: keys_path,
|
keys: keys_path,
|
||||||
base: data_path,
|
base: data_path,
|
||||||
|
cache: cache_path,
|
||||||
db: db_path,
|
db: db_path,
|
||||||
dapps: dapps_path,
|
dapps: dapps_path,
|
||||||
signer: ui_path,
|
signer: ui_path,
|
||||||
@ -1554,6 +1562,19 @@ mod tests {
|
|||||||
assert!(conf.init_reserved_nodes().is_ok());
|
assert!(conf.init_reserved_nodes().is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn should_ignore_comments_in_reserved_peers() {
|
||||||
|
let temp = RandomTempPath::new();
|
||||||
|
create_dir(temp.as_str().to_owned()).unwrap();
|
||||||
|
let filename = temp.as_str().to_owned() + "/peers_comments";
|
||||||
|
File::create(filename.clone()).unwrap().write_all(b"# Sample comment\nenode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@172.0.0.1:30303\n").unwrap();
|
||||||
|
let args = vec!["parity", "--reserved-peers", &filename];
|
||||||
|
let conf = Configuration::parse(&args, None).unwrap();
|
||||||
|
let reserved_nodes = conf.init_reserved_nodes();
|
||||||
|
assert!(reserved_nodes.is_ok());
|
||||||
|
assert_eq!(reserved_nodes.unwrap().len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_dev_chain() {
|
fn test_dev_chain() {
|
||||||
let args = vec!["parity", "--chain", "dev"];
|
let args = vec!["parity", "--chain", "dev"];
|
||||||
|
@ -18,7 +18,7 @@ use std::fs;
|
|||||||
use std::path::{PathBuf, Path};
|
use std::path::{PathBuf, Path};
|
||||||
use util::{H64, H256};
|
use util::{H64, H256};
|
||||||
use util::journaldb::Algorithm;
|
use util::journaldb::Algorithm;
|
||||||
use helpers::{replace_home, replace_home_for_db};
|
use helpers::{replace_home, replace_home_and_local};
|
||||||
use app_dirs::{AppInfo, get_app_root, AppDataType};
|
use app_dirs::{AppInfo, get_app_root, AppDataType};
|
||||||
|
|
||||||
#[cfg(target_os = "macos")] const AUTHOR: &'static str = "Parity";
|
#[cfg(target_os = "macos")] const AUTHOR: &'static str = "Parity";
|
||||||
@ -34,6 +34,9 @@ use app_dirs::{AppInfo, get_app_root, AppDataType};
|
|||||||
#[cfg(target_os = "windows")] pub const CHAINS_PATH: &'static str = "$LOCAL/chains";
|
#[cfg(target_os = "windows")] pub const CHAINS_PATH: &'static str = "$LOCAL/chains";
|
||||||
#[cfg(not(target_os = "windows"))] pub const CHAINS_PATH: &'static str = "$BASE/chains";
|
#[cfg(not(target_os = "windows"))] pub const CHAINS_PATH: &'static str = "$BASE/chains";
|
||||||
|
|
||||||
|
#[cfg(target_os = "windows")] pub const CACHE_PATH: &'static str = "$LOCAL/cache";
|
||||||
|
#[cfg(not(target_os = "windows"))] pub const CACHE_PATH: &'static str = "$BASE/cache";
|
||||||
|
|
||||||
// this const is irrelevent cause we do have migrations now,
|
// this const is irrelevent cause we do have migrations now,
|
||||||
// but we still use it for backwards compatibility
|
// but we still use it for backwards compatibility
|
||||||
const LEGACY_CLIENT_DB_VER_STR: &'static str = "5.3";
|
const LEGACY_CLIENT_DB_VER_STR: &'static str = "5.3";
|
||||||
@ -42,6 +45,7 @@ const LEGACY_CLIENT_DB_VER_STR: &'static str = "5.3";
|
|||||||
pub struct Directories {
|
pub struct Directories {
|
||||||
pub base: String,
|
pub base: String,
|
||||||
pub db: String,
|
pub db: String,
|
||||||
|
pub cache: String,
|
||||||
pub keys: String,
|
pub keys: String,
|
||||||
pub signer: String,
|
pub signer: String,
|
||||||
pub dapps: String,
|
pub dapps: String,
|
||||||
@ -54,7 +58,8 @@ impl Default for Directories {
|
|||||||
let local_dir = default_local_path();
|
let local_dir = default_local_path();
|
||||||
Directories {
|
Directories {
|
||||||
base: replace_home(&data_dir, "$BASE"),
|
base: replace_home(&data_dir, "$BASE"),
|
||||||
db: replace_home_for_db(&data_dir, &local_dir, CHAINS_PATH),
|
db: replace_home_and_local(&data_dir, &local_dir, CHAINS_PATH),
|
||||||
|
cache: replace_home_and_local(&data_dir, &local_dir, CACHE_PATH),
|
||||||
keys: replace_home(&data_dir, "$BASE/keys"),
|
keys: replace_home(&data_dir, "$BASE/keys"),
|
||||||
signer: replace_home(&data_dir, "$BASE/signer"),
|
signer: replace_home(&data_dir, "$BASE/signer"),
|
||||||
dapps: replace_home(&data_dir, "$BASE/dapps"),
|
dapps: replace_home(&data_dir, "$BASE/dapps"),
|
||||||
@ -67,6 +72,7 @@ impl Directories {
|
|||||||
pub fn create_dirs(&self, dapps_enabled: bool, signer_enabled: bool, secretstore_enabled: bool) -> Result<(), String> {
|
pub fn create_dirs(&self, dapps_enabled: bool, signer_enabled: bool, secretstore_enabled: bool) -> Result<(), String> {
|
||||||
fs::create_dir_all(&self.base).map_err(|e| e.to_string())?;
|
fs::create_dir_all(&self.base).map_err(|e| e.to_string())?;
|
||||||
fs::create_dir_all(&self.db).map_err(|e| e.to_string())?;
|
fs::create_dir_all(&self.db).map_err(|e| e.to_string())?;
|
||||||
|
fs::create_dir_all(&self.cache).map_err(|e| e.to_string())?;
|
||||||
fs::create_dir_all(&self.keys).map_err(|e| e.to_string())?;
|
fs::create_dir_all(&self.keys).map_err(|e| e.to_string())?;
|
||||||
if signer_enabled {
|
if signer_enabled {
|
||||||
fs::create_dir_all(&self.signer).map_err(|e| e.to_string())?;
|
fs::create_dir_all(&self.signer).map_err(|e| e.to_string())?;
|
||||||
@ -231,7 +237,7 @@ pub fn default_hypervisor_path() -> String {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::Directories;
|
use super::Directories;
|
||||||
use helpers::{replace_home, replace_home_for_db};
|
use helpers::{replace_home, replace_home_and_local};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_default_directories() {
|
fn test_default_directories() {
|
||||||
@ -239,10 +245,14 @@ mod tests {
|
|||||||
let local_dir = super::default_local_path();
|
let local_dir = super::default_local_path();
|
||||||
let expected = Directories {
|
let expected = Directories {
|
||||||
base: replace_home(&data_dir, "$BASE"),
|
base: replace_home(&data_dir, "$BASE"),
|
||||||
db: replace_home_for_db(&data_dir, &local_dir,
|
db: replace_home_and_local(&data_dir, &local_dir,
|
||||||
if cfg!(target_os = "windows") { "$LOCAL/chains" }
|
if cfg!(target_os = "windows") { "$LOCAL/chains" }
|
||||||
else { "$BASE/chains" }
|
else { "$BASE/chains" }
|
||||||
),
|
),
|
||||||
|
cache: replace_home_and_local(&data_dir, &local_dir,
|
||||||
|
if cfg!(target_os = "windows") { "$LOCAL/cache" }
|
||||||
|
else { "$BASE/cache" }
|
||||||
|
),
|
||||||
keys: replace_home(&data_dir, "$BASE/keys"),
|
keys: replace_home(&data_dir, "$BASE/keys"),
|
||||||
signer: replace_home(&data_dir, "$BASE/signer"),
|
signer: replace_home(&data_dir, "$BASE/signer"),
|
||||||
dapps: replace_home(&data_dir, "$BASE/dapps"),
|
dapps: replace_home(&data_dir, "$BASE/dapps"),
|
||||||
|
@ -140,7 +140,7 @@ pub fn replace_home(base: &str, arg: &str) -> String {
|
|||||||
r.replace("/", &::std::path::MAIN_SEPARATOR.to_string())
|
r.replace("/", &::std::path::MAIN_SEPARATOR.to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn replace_home_for_db(base: &str, local: &str, arg: &str) -> String {
|
pub fn replace_home_and_local(base: &str, local: &str, arg: &str) -> String {
|
||||||
let r = replace_home(base, arg);
|
let r = replace_home(base, arg);
|
||||||
r.replace("$LOCAL", local)
|
r.replace("$LOCAL", local)
|
||||||
}
|
}
|
||||||
|
@ -21,32 +21,21 @@ use self::ansi_term::Style;
|
|||||||
use std::sync::{Arc};
|
use std::sync::{Arc};
|
||||||
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
|
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
|
||||||
use std::time::{Instant, Duration};
|
use std::time::{Instant, Duration};
|
||||||
|
|
||||||
|
use ethcore::client::*;
|
||||||
|
use ethcore::header::BlockNumber;
|
||||||
|
use ethcore::service::ClientIoMessage;
|
||||||
|
use ethcore::snapshot::{RestorationStatus, SnapshotService as SS};
|
||||||
|
use ethcore::snapshot::service::Service as SnapshotService;
|
||||||
|
use ethsync::{LightSyncProvider, LightSync, SyncProvider, ManageNetwork};
|
||||||
use io::{TimerToken, IoContext, IoHandler};
|
use io::{TimerToken, IoContext, IoHandler};
|
||||||
use isatty::{stdout_isatty};
|
use isatty::{stdout_isatty};
|
||||||
use ethsync::{SyncProvider, ManageNetwork};
|
use light::Cache as LightDataCache;
|
||||||
use util::{RwLock, Mutex, H256, Colour, Bytes};
|
use light::client::LightChainClient;
|
||||||
use ethcore::client::*;
|
|
||||||
use ethcore::service::ClientIoMessage;
|
|
||||||
use ethcore::snapshot::service::Service as SnapshotService;
|
|
||||||
use ethcore::snapshot::{RestorationStatus, SnapshotService as SS};
|
|
||||||
use number_prefix::{binary_prefix, Standalone, Prefixed};
|
use number_prefix::{binary_prefix, Standalone, Prefixed};
|
||||||
use parity_rpc::{is_major_importing};
|
use parity_rpc::{is_major_importing};
|
||||||
use parity_rpc::informant::RpcStats;
|
use parity_rpc::informant::RpcStats;
|
||||||
|
use util::{RwLock, Mutex, H256, Colour, Bytes};
|
||||||
pub struct Informant {
|
|
||||||
report: RwLock<Option<ClientReport>>,
|
|
||||||
last_tick: RwLock<Instant>,
|
|
||||||
with_color: bool,
|
|
||||||
client: Arc<Client>,
|
|
||||||
snapshot: Option<Arc<SnapshotService>>,
|
|
||||||
sync: Option<Arc<SyncProvider>>,
|
|
||||||
net: Option<Arc<ManageNetwork>>,
|
|
||||||
rpc_stats: Option<Arc<RpcStats>>,
|
|
||||||
last_import: Mutex<Instant>,
|
|
||||||
skipped: AtomicUsize,
|
|
||||||
skipped_txs: AtomicUsize,
|
|
||||||
in_shutdown: AtomicBool,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Format byte counts to standard denominations.
|
/// Format byte counts to standard denominations.
|
||||||
pub fn format_bytes(b: usize) -> String {
|
pub fn format_bytes(b: usize) -> String {
|
||||||
@ -68,29 +57,188 @@ impl MillisecondDuration for Duration {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Informant {
|
#[derive(Default)]
|
||||||
|
struct CacheSizes {
|
||||||
|
sizes: ::std::collections::BTreeMap<&'static str, usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CacheSizes {
|
||||||
|
fn insert(&mut self, key: &'static str, bytes: usize) {
|
||||||
|
self.sizes.insert(key, bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn display<F>(&self, style: Style, paint: F) -> String
|
||||||
|
where F: Fn(Style, String) -> String
|
||||||
|
{
|
||||||
|
use std::fmt::Write;
|
||||||
|
|
||||||
|
let mut buf = String::new();
|
||||||
|
for (name, &size) in &self.sizes {
|
||||||
|
|
||||||
|
write!(buf, " {:>8} {}", paint(style, format_bytes(size)), name)
|
||||||
|
.expect("writing to string won't fail unless OOM; qed")
|
||||||
|
}
|
||||||
|
|
||||||
|
buf
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct SyncInfo {
|
||||||
|
last_imported_block_number: BlockNumber,
|
||||||
|
last_imported_old_block_number: Option<BlockNumber>,
|
||||||
|
num_peers: usize,
|
||||||
|
max_peers: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Report {
|
||||||
|
importing: bool,
|
||||||
|
chain_info: BlockChainInfo,
|
||||||
|
client_report: ClientReport,
|
||||||
|
queue_info: BlockQueueInfo,
|
||||||
|
cache_sizes: CacheSizes,
|
||||||
|
sync_info: Option<SyncInfo>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Something which can provide data to the informant.
|
||||||
|
pub trait InformantData: Send + Sync {
|
||||||
|
/// Whether it executes transactions
|
||||||
|
fn executes_transactions(&self) -> bool;
|
||||||
|
|
||||||
|
/// Whether it is currently importing (also included in `Report`)
|
||||||
|
fn is_major_importing(&self) -> bool;
|
||||||
|
|
||||||
|
/// Generate a report of blockchain status, memory usage, and sync info.
|
||||||
|
fn report(&self) -> Report;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Informant data for a full node.
|
||||||
|
pub struct FullNodeInformantData {
|
||||||
|
pub client: Arc<Client>,
|
||||||
|
pub sync: Option<Arc<SyncProvider>>,
|
||||||
|
pub net: Option<Arc<ManageNetwork>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InformantData for FullNodeInformantData {
|
||||||
|
fn executes_transactions(&self) -> bool { true }
|
||||||
|
|
||||||
|
fn is_major_importing(&self) -> bool {
|
||||||
|
let state = self.sync.as_ref().map(|sync| sync.status().state);
|
||||||
|
is_major_importing(state, self.client.queue_info())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn report(&self) -> Report {
|
||||||
|
let (client_report, queue_info, blockchain_cache_info) =
|
||||||
|
(self.client.report(), self.client.queue_info(), self.client.blockchain_cache_info());
|
||||||
|
|
||||||
|
let chain_info = self.client.chain_info();
|
||||||
|
|
||||||
|
let mut cache_sizes = CacheSizes::default();
|
||||||
|
cache_sizes.insert("db", client_report.state_db_mem);
|
||||||
|
cache_sizes.insert("queue", queue_info.mem_used);
|
||||||
|
cache_sizes.insert("chain", blockchain_cache_info.total());
|
||||||
|
|
||||||
|
let (importing, sync_info) = match (self.sync.as_ref(), self.net.as_ref()) {
|
||||||
|
(Some(sync), Some(net)) => {
|
||||||
|
let status = sync.status();
|
||||||
|
let net_config = net.network_config();
|
||||||
|
|
||||||
|
cache_sizes.insert("sync", status.mem_used);
|
||||||
|
|
||||||
|
let importing = is_major_importing(Some(status.state), queue_info.clone());
|
||||||
|
(importing, Some(SyncInfo {
|
||||||
|
last_imported_block_number: status.last_imported_block_number.unwrap_or(chain_info.best_block_number),
|
||||||
|
last_imported_old_block_number: status.last_imported_old_block_number,
|
||||||
|
num_peers: status.num_peers,
|
||||||
|
max_peers: status.current_max_peers(net_config.min_peers, net_config.max_peers),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
_ => (is_major_importing(None, queue_info.clone()), None),
|
||||||
|
};
|
||||||
|
|
||||||
|
Report {
|
||||||
|
importing,
|
||||||
|
chain_info,
|
||||||
|
client_report,
|
||||||
|
queue_info,
|
||||||
|
cache_sizes,
|
||||||
|
sync_info,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Informant data for a light node -- note that the network is required.
|
||||||
|
pub struct LightNodeInformantData {
|
||||||
|
pub client: Arc<LightChainClient>,
|
||||||
|
pub sync: Arc<LightSync>,
|
||||||
|
pub cache: Arc<Mutex<LightDataCache>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InformantData for LightNodeInformantData {
|
||||||
|
fn executes_transactions(&self) -> bool { false }
|
||||||
|
|
||||||
|
fn is_major_importing(&self) -> bool {
|
||||||
|
self.sync.is_major_importing()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn report(&self) -> Report {
|
||||||
|
let (client_report, queue_info, chain_info) =
|
||||||
|
(self.client.report(), self.client.queue_info(), self.client.chain_info());
|
||||||
|
|
||||||
|
let mut cache_sizes = CacheSizes::default();
|
||||||
|
cache_sizes.insert("queue", queue_info.mem_used);
|
||||||
|
cache_sizes.insert("cache", self.cache.lock().mem_used());
|
||||||
|
|
||||||
|
let peer_numbers = self.sync.peer_numbers();
|
||||||
|
let sync_info = Some(SyncInfo {
|
||||||
|
last_imported_block_number: chain_info.best_block_number,
|
||||||
|
last_imported_old_block_number: None,
|
||||||
|
num_peers: peer_numbers.connected,
|
||||||
|
max_peers: peer_numbers.max as u32,
|
||||||
|
});
|
||||||
|
|
||||||
|
Report {
|
||||||
|
importing: self.sync.is_major_importing(),
|
||||||
|
chain_info,
|
||||||
|
client_report,
|
||||||
|
queue_info,
|
||||||
|
cache_sizes,
|
||||||
|
sync_info,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Informant<T> {
|
||||||
|
last_tick: RwLock<Instant>,
|
||||||
|
with_color: bool,
|
||||||
|
target: T,
|
||||||
|
snapshot: Option<Arc<SnapshotService>>,
|
||||||
|
rpc_stats: Option<Arc<RpcStats>>,
|
||||||
|
last_import: Mutex<Instant>,
|
||||||
|
skipped: AtomicUsize,
|
||||||
|
skipped_txs: AtomicUsize,
|
||||||
|
in_shutdown: AtomicBool,
|
||||||
|
last_report: Mutex<ClientReport>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: InformantData> Informant<T> {
|
||||||
/// Make a new instance potentially `with_color` output.
|
/// Make a new instance potentially `with_color` output.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
client: Arc<Client>,
|
target: T,
|
||||||
sync: Option<Arc<SyncProvider>>,
|
|
||||||
net: Option<Arc<ManageNetwork>>,
|
|
||||||
snapshot: Option<Arc<SnapshotService>>,
|
snapshot: Option<Arc<SnapshotService>>,
|
||||||
rpc_stats: Option<Arc<RpcStats>>,
|
rpc_stats: Option<Arc<RpcStats>>,
|
||||||
with_color: bool,
|
with_color: bool,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Informant {
|
Informant {
|
||||||
report: RwLock::new(None),
|
|
||||||
last_tick: RwLock::new(Instant::now()),
|
last_tick: RwLock::new(Instant::now()),
|
||||||
with_color: with_color,
|
with_color: with_color,
|
||||||
client: client,
|
target: target,
|
||||||
snapshot: snapshot,
|
snapshot: snapshot,
|
||||||
sync: sync,
|
|
||||||
net: net,
|
|
||||||
rpc_stats: rpc_stats,
|
rpc_stats: rpc_stats,
|
||||||
last_import: Mutex::new(Instant::now()),
|
last_import: Mutex::new(Instant::now()),
|
||||||
skipped: AtomicUsize::new(0),
|
skipped: AtomicUsize::new(0),
|
||||||
skipped_txs: AtomicUsize::new(0),
|
skipped_txs: AtomicUsize::new(0),
|
||||||
in_shutdown: AtomicBool::new(false),
|
in_shutdown: AtomicBool::new(false),
|
||||||
|
last_report: Mutex::new(Default::default()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -106,14 +254,24 @@ impl Informant {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
let chain_info = self.client.chain_info();
|
let Report {
|
||||||
let queue_info = self.client.queue_info();
|
importing,
|
||||||
let cache_info = self.client.blockchain_cache_info();
|
chain_info,
|
||||||
let network_config = self.net.as_ref().map(|n| n.network_config());
|
client_report,
|
||||||
let sync_status = self.sync.as_ref().map(|s| s.status());
|
queue_info,
|
||||||
|
cache_sizes,
|
||||||
|
sync_info,
|
||||||
|
} = self.target.report();
|
||||||
|
|
||||||
|
let client_report = {
|
||||||
|
let mut last_report = self.last_report.lock();
|
||||||
|
let diffed = client_report.clone() - &*last_report;
|
||||||
|
*last_report = client_report.clone();
|
||||||
|
diffed
|
||||||
|
};
|
||||||
|
|
||||||
let rpc_stats = self.rpc_stats.as_ref();
|
let rpc_stats = self.rpc_stats.as_ref();
|
||||||
|
|
||||||
let importing = is_major_importing(sync_status.map(|s| s.state), self.client.queue_info());
|
|
||||||
let (snapshot_sync, snapshot_current, snapshot_total) = self.snapshot.as_ref().map_or((false, 0, 0), |s|
|
let (snapshot_sync, snapshot_current, snapshot_total) = self.snapshot.as_ref().map_or((false, 0, 0), |s|
|
||||||
match s.status() {
|
match s.status() {
|
||||||
RestorationStatus::Ongoing { state_chunks, block_chunks, state_chunks_done, block_chunks_done } =>
|
RestorationStatus::Ongoing { state_chunks, block_chunks, state_chunks_done, block_chunks_done } =>
|
||||||
@ -128,9 +286,6 @@ impl Informant {
|
|||||||
|
|
||||||
*self.last_tick.write() = Instant::now();
|
*self.last_tick.write() = Instant::now();
|
||||||
|
|
||||||
let mut write_report = self.report.write();
|
|
||||||
let report = self.client.report();
|
|
||||||
|
|
||||||
let paint = |c: Style, t: String| match self.with_color && stdout_isatty() {
|
let paint = |c: Style, t: String| match self.with_color && stdout_isatty() {
|
||||||
true => format!("{}", c.paint(t)),
|
true => format!("{}", c.paint(t)),
|
||||||
false => t,
|
false => t,
|
||||||
@ -142,13 +297,16 @@ impl Informant {
|
|||||||
false => format!("Syncing {} {} {} {}+{} Qed",
|
false => format!("Syncing {} {} {} {}+{} Qed",
|
||||||
paint(White.bold(), format!("{:>8}", format!("#{}", chain_info.best_block_number))),
|
paint(White.bold(), format!("{:>8}", format!("#{}", chain_info.best_block_number))),
|
||||||
paint(White.bold(), format!("{}", chain_info.best_block_hash)),
|
paint(White.bold(), format!("{}", chain_info.best_block_hash)),
|
||||||
{
|
if self.target.executes_transactions() {
|
||||||
let last_report = match *write_report { Some(ref last_report) => last_report.clone(), _ => ClientReport::default() };
|
|
||||||
format!("{} blk/s {} tx/s {} Mgas/s",
|
format!("{} blk/s {} tx/s {} Mgas/s",
|
||||||
paint(Yellow.bold(), format!("{:4}", ((report.blocks_imported - last_report.blocks_imported) * 1000) as u64 / elapsed.as_milliseconds())),
|
paint(Yellow.bold(), format!("{:4}", (client_report.blocks_imported * 1000) as u64 / elapsed.as_milliseconds())),
|
||||||
paint(Yellow.bold(), format!("{:4}", ((report.transactions_applied - last_report.transactions_applied) * 1000) as u64 / elapsed.as_milliseconds())),
|
paint(Yellow.bold(), format!("{:4}", (client_report.transactions_applied * 1000) as u64 / elapsed.as_milliseconds())),
|
||||||
paint(Yellow.bold(), format!("{:3}", ((report.gas_processed - last_report.gas_processed) / From::from(elapsed.as_milliseconds() * 1000)).low_u64()))
|
paint(Yellow.bold(), format!("{:3}", (client_report.gas_processed / From::from(elapsed.as_milliseconds() * 1000)).low_u64()))
|
||||||
)
|
)
|
||||||
|
} else {
|
||||||
|
format!("{} hdr/s",
|
||||||
|
paint(Yellow.bold(), format!("{:4}", (client_report.blocks_imported * 1000) as u64 / elapsed.as_milliseconds()))
|
||||||
|
)
|
||||||
},
|
},
|
||||||
paint(Green.bold(), format!("{:5}", queue_info.unverified_queue_size)),
|
paint(Green.bold(), format!("{:5}", queue_info.unverified_queue_size)),
|
||||||
paint(Green.bold(), format!("{:5}", queue_info.verified_queue_size))
|
paint(Green.bold(), format!("{:5}", queue_info.verified_queue_size))
|
||||||
@ -157,29 +315,21 @@ impl Informant {
|
|||||||
},
|
},
|
||||||
false => String::new(),
|
false => String::new(),
|
||||||
},
|
},
|
||||||
match (&sync_status, &network_config) {
|
match sync_info.as_ref() {
|
||||||
(&Some(ref sync_info), &Some(ref net_config)) => format!("{}{}/{} peers",
|
Some(ref sync_info) => format!("{}{}/{} peers",
|
||||||
match importing {
|
match importing {
|
||||||
true => format!("{} ", paint(Green.bold(), format!("{:>8}", format!("#{}", sync_info.last_imported_block_number.unwrap_or(chain_info.best_block_number))))),
|
true => format!("{} ", paint(Green.bold(), format!("{:>8}", format!("#{}", sync_info.last_imported_block_number)))),
|
||||||
false => match sync_info.last_imported_old_block_number {
|
false => match sync_info.last_imported_old_block_number {
|
||||||
Some(number) => format!("{} ", paint(Yellow.bold(), format!("{:>8}", format!("#{}", number)))),
|
Some(number) => format!("{} ", paint(Yellow.bold(), format!("{:>8}", format!("#{}", number)))),
|
||||||
None => String::new(),
|
None => String::new(),
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
paint(Cyan.bold(), format!("{:2}", sync_info.num_peers)),
|
paint(Cyan.bold(), format!("{:2}", sync_info.num_peers)),
|
||||||
paint(Cyan.bold(), format!("{:2}", sync_info.current_max_peers(net_config.min_peers, net_config.max_peers))),
|
paint(Cyan.bold(), format!("{:2}", sync_info.max_peers)),
|
||||||
),
|
),
|
||||||
_ => String::new(),
|
_ => String::new(),
|
||||||
},
|
},
|
||||||
format!("{} db {} chain {} queue{}",
|
cache_sizes.display(Blue.bold(), &paint),
|
||||||
paint(Blue.bold(), format!("{:>8}", format_bytes(report.state_db_mem))),
|
|
||||||
paint(Blue.bold(), format!("{:>8}", format_bytes(cache_info.total()))),
|
|
||||||
paint(Blue.bold(), format!("{:>8}", format_bytes(queue_info.mem_used))),
|
|
||||||
match sync_status {
|
|
||||||
Some(ref sync_info) => format!(" {} sync", paint(Blue.bold(), format!("{:>8}", format_bytes(sync_info.mem_used)))),
|
|
||||||
_ => String::new(),
|
|
||||||
}
|
|
||||||
),
|
|
||||||
match rpc_stats {
|
match rpc_stats {
|
||||||
Some(ref rpc_stats) => format!(
|
Some(ref rpc_stats) => format!(
|
||||||
"RPC: {} conn, {} req/s, {} µs",
|
"RPC: {} conn, {} req/s, {} µs",
|
||||||
@ -190,25 +340,24 @@ impl Informant {
|
|||||||
_ => String::new(),
|
_ => String::new(),
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
*write_report = Some(report);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ChainNotify for Informant {
|
impl ChainNotify for Informant<FullNodeInformantData> {
|
||||||
fn new_blocks(&self, imported: Vec<H256>, _invalid: Vec<H256>, _enacted: Vec<H256>, _retracted: Vec<H256>, _sealed: Vec<H256>, _proposed: Vec<Bytes>, duration: u64) {
|
fn new_blocks(&self, imported: Vec<H256>, _invalid: Vec<H256>, _enacted: Vec<H256>, _retracted: Vec<H256>, _sealed: Vec<H256>, _proposed: Vec<Bytes>, duration: u64) {
|
||||||
let mut last_import = self.last_import.lock();
|
let mut last_import = self.last_import.lock();
|
||||||
let sync_state = self.sync.as_ref().map(|s| s.status().state);
|
let client = &self.target.client;
|
||||||
let importing = is_major_importing(sync_state, self.client.queue_info());
|
|
||||||
|
let importing = self.target.is_major_importing();
|
||||||
let ripe = Instant::now() > *last_import + Duration::from_secs(1) && !importing;
|
let ripe = Instant::now() > *last_import + Duration::from_secs(1) && !importing;
|
||||||
let txs_imported = imported.iter()
|
let txs_imported = imported.iter()
|
||||||
.take(imported.len().saturating_sub(if ripe { 1 } else { 0 }))
|
.take(imported.len().saturating_sub(if ripe { 1 } else { 0 }))
|
||||||
.filter_map(|h| self.client.block(BlockId::Hash(*h)))
|
.filter_map(|h| client.block(BlockId::Hash(*h)))
|
||||||
.map(|b| b.transactions_count())
|
.map(|b| b.transactions_count())
|
||||||
.sum();
|
.sum();
|
||||||
|
|
||||||
if ripe {
|
if ripe {
|
||||||
if let Some(block) = imported.last().and_then(|h| self.client.block(BlockId::Hash(*h))) {
|
if let Some(block) = imported.last().and_then(|h| client.block(BlockId::Hash(*h))) {
|
||||||
let header_view = block.header_view();
|
let header_view = block.header_view();
|
||||||
let size = block.rlp().as_raw().len();
|
let size = block.rlp().as_raw().len();
|
||||||
let (skipped, skipped_txs) = (self.skipped.load(AtomicOrdering::Relaxed) + imported.len() - 1, self.skipped_txs.load(AtomicOrdering::Relaxed) + txs_imported);
|
let (skipped, skipped_txs) = (self.skipped.load(AtomicOrdering::Relaxed) + imported.len() - 1, self.skipped_txs.load(AtomicOrdering::Relaxed) + txs_imported);
|
||||||
@ -241,7 +390,7 @@ impl ChainNotify for Informant {
|
|||||||
|
|
||||||
const INFO_TIMER: TimerToken = 0;
|
const INFO_TIMER: TimerToken = 0;
|
||||||
|
|
||||||
impl IoHandler<ClientIoMessage> for Informant {
|
impl<T: InformantData> IoHandler<ClientIoMessage> for Informant<T> {
|
||||||
fn initialize(&self, io: &IoContext<ClientIoMessage>) {
|
fn initialize(&self, io: &IoContext<ClientIoMessage>) {
|
||||||
io.register_timer(INFO_TIMER, 5000).expect("Error registering timer");
|
io.register_timer(INFO_TIMER, 5000).expect("Error registering timer");
|
||||||
}
|
}
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::{str, fs, fmt};
|
use std::{str, fs, fmt, path};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use util::{Address, U256, version_data};
|
use util::{Address, U256, version_data};
|
||||||
use util::journaldb::Algorithm;
|
use util::journaldb::Algorithm;
|
||||||
@ -79,19 +79,20 @@ impl fmt::Display for SpecType {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SpecType {
|
impl SpecType {
|
||||||
pub fn spec(&self) -> Result<Spec, String> {
|
pub fn spec<T: AsRef<path::Path>>(&self, cache_dir: T) -> Result<Spec, String> {
|
||||||
|
let cache_dir = cache_dir.as_ref();
|
||||||
match *self {
|
match *self {
|
||||||
SpecType::Foundation => Ok(ethereum::new_foundation()),
|
SpecType::Foundation => Ok(ethereum::new_foundation(cache_dir)),
|
||||||
SpecType::Morden => Ok(ethereum::new_morden()),
|
SpecType::Morden => Ok(ethereum::new_morden(cache_dir)),
|
||||||
SpecType::Ropsten => Ok(ethereum::new_ropsten()),
|
SpecType::Ropsten => Ok(ethereum::new_ropsten(cache_dir)),
|
||||||
SpecType::Olympic => Ok(ethereum::new_olympic()),
|
SpecType::Olympic => Ok(ethereum::new_olympic(cache_dir)),
|
||||||
SpecType::Classic => Ok(ethereum::new_classic()),
|
SpecType::Classic => Ok(ethereum::new_classic(cache_dir)),
|
||||||
SpecType::Expanse => Ok(ethereum::new_expanse()),
|
SpecType::Expanse => Ok(ethereum::new_expanse(cache_dir)),
|
||||||
SpecType::Kovan => Ok(ethereum::new_kovan()),
|
SpecType::Kovan => Ok(ethereum::new_kovan(cache_dir)),
|
||||||
SpecType::Dev => Ok(Spec::new_instant()),
|
SpecType::Dev => Ok(Spec::new_instant()),
|
||||||
SpecType::Custom(ref filename) => {
|
SpecType::Custom(ref filename) => {
|
||||||
let file = fs::File::open(filename).map_err(|_| "Could not load specification file.")?;
|
let file = fs::File::open(filename).map_err(|e| format!("Could not load specification file at {}: {}", filename, e))?;
|
||||||
Spec::load(file)
|
Spec::load(cache_dir, file)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -16,26 +16,27 @@
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::net::{TcpListener};
|
use std::net::{TcpListener};
|
||||||
|
|
||||||
use ctrlc::CtrlC;
|
use ctrlc::CtrlC;
|
||||||
use fdlimit::raise_fd_limit;
|
|
||||||
use parity_rpc::{NetworkSettings, informant, is_major_importing};
|
|
||||||
use ethsync::NetworkConfiguration;
|
|
||||||
use util::{Colour, version, Mutex, Condvar};
|
|
||||||
use ethcore_logger::{Config as LogConfig, RotatingLogger};
|
use ethcore_logger::{Config as LogConfig, RotatingLogger};
|
||||||
use ethcore::miner::{StratumOptions, Stratum};
|
|
||||||
use ethcore::client::{Client, Mode, DatabaseCompactionProfile, VMType, BlockChainClient};
|
|
||||||
use ethcore::service::ClientService;
|
|
||||||
use ethcore::account_provider::{AccountProvider, AccountProviderSettings};
|
use ethcore::account_provider::{AccountProvider, AccountProviderSettings};
|
||||||
|
use ethcore::client::{Client, Mode, DatabaseCompactionProfile, VMType, BlockChainClient};
|
||||||
|
use ethcore::ethstore::ethkey;
|
||||||
use ethcore::miner::{Miner, MinerService, ExternalMiner, MinerOptions};
|
use ethcore::miner::{Miner, MinerService, ExternalMiner, MinerOptions};
|
||||||
|
use ethcore::miner::{StratumOptions, Stratum};
|
||||||
|
use ethcore::service::ClientService;
|
||||||
use ethcore::snapshot;
|
use ethcore::snapshot;
|
||||||
use ethcore::verification::queue::VerifierSettings;
|
use ethcore::verification::queue::VerifierSettings;
|
||||||
use ethcore::ethstore::ethkey;
|
use ethsync::NetworkConfiguration;
|
||||||
use light::Cache as LightDataCache;
|
|
||||||
use ethsync::SyncConfig;
|
use ethsync::SyncConfig;
|
||||||
use informant::Informant;
|
use fdlimit::raise_fd_limit;
|
||||||
use updater::{UpdatePolicy, Updater};
|
|
||||||
use parity_reactor::EventLoop;
|
|
||||||
use hash_fetch::fetch::{Fetch, Client as FetchClient};
|
use hash_fetch::fetch::{Fetch, Client as FetchClient};
|
||||||
|
use informant::{Informant, LightNodeInformantData, FullNodeInformantData};
|
||||||
|
use light::Cache as LightDataCache;
|
||||||
|
use parity_reactor::EventLoop;
|
||||||
|
use parity_rpc::{NetworkSettings, informant, is_major_importing};
|
||||||
|
use updater::{UpdatePolicy, Updater};
|
||||||
|
use util::{Colour, version, Mutex, Condvar};
|
||||||
|
|
||||||
use params::{
|
use params::{
|
||||||
SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch,
|
SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch,
|
||||||
@ -168,7 +169,7 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) ->
|
|||||||
use util::RwLock;
|
use util::RwLock;
|
||||||
|
|
||||||
// load spec
|
// load spec
|
||||||
let spec = cmd.spec.spec()?;
|
let spec = cmd.spec.spec(&cmd.dirs.cache)?;
|
||||||
|
|
||||||
// load genesis hash
|
// load genesis hash
|
||||||
let genesis_hash = spec.genesis_header().hash();
|
let genesis_hash = spec.genesis_header().hash();
|
||||||
@ -209,6 +210,7 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) ->
|
|||||||
db_cache_size: Some(cmd.cache_config.blockchain() as usize * 1024 * 1024),
|
db_cache_size: Some(cmd.cache_config.blockchain() as usize * 1024 * 1024),
|
||||||
db_compaction: compaction,
|
db_compaction: compaction,
|
||||||
db_wal: cmd.wal,
|
db_wal: cmd.wal,
|
||||||
|
verify_full: true,
|
||||||
};
|
};
|
||||||
|
|
||||||
config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024;
|
config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024;
|
||||||
@ -300,7 +302,7 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) ->
|
|||||||
logger: logger,
|
logger: logger,
|
||||||
settings: Arc::new(cmd.net_settings),
|
settings: Arc::new(cmd.net_settings),
|
||||||
on_demand: on_demand,
|
on_demand: on_demand,
|
||||||
cache: cache,
|
cache: cache.clone(),
|
||||||
transaction_queue: txq,
|
transaction_queue: txq,
|
||||||
dapps_service: dapps_service,
|
dapps_service: dapps_service,
|
||||||
dapps_address: cmd.dapps_conf.address(cmd.http_conf.address()),
|
dapps_address: cmd.dapps_conf.address(cmd.http_conf.address()),
|
||||||
@ -322,16 +324,25 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) ->
|
|||||||
let _ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?;
|
let _ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?;
|
||||||
let _ui_server = rpc::new_http("Parity Wallet (UI)", "ui", cmd.ui_conf.clone().into(), &dependencies, ui_middleware)?;
|
let _ui_server = rpc::new_http("Parity Wallet (UI)", "ui", cmd.ui_conf.clone().into(), &dependencies, ui_middleware)?;
|
||||||
|
|
||||||
// minimal informant thread. Just prints block number every 5 seconds.
|
// the informant
|
||||||
// TODO: integrate with informant.rs
|
let informant = Arc::new(Informant::new(
|
||||||
let informant_client = service.client().clone();
|
LightNodeInformantData {
|
||||||
::std::thread::spawn(move || loop {
|
client: service.client().clone(),
|
||||||
info!("#{}", informant_client.best_block_header().number());
|
sync: light_sync.clone(),
|
||||||
::std::thread::sleep(::std::time::Duration::from_secs(5));
|
cache: cache,
|
||||||
});
|
},
|
||||||
|
None,
|
||||||
|
Some(rpc_stats),
|
||||||
|
cmd.logger_config.color,
|
||||||
|
));
|
||||||
|
|
||||||
// wait for ctrl-c.
|
service.register_handler(informant.clone()).map_err(|_| "Unable to register informant handler".to_owned())?;
|
||||||
Ok(wait_for_exit(None, None, can_restart))
|
|
||||||
|
// wait for ctrl-c and then shut down the informant.
|
||||||
|
let res = wait_for_exit(None, None, can_restart);
|
||||||
|
informant.shutdown();
|
||||||
|
|
||||||
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> Result<(bool, Option<String>), String> {
|
pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> Result<(bool, Option<String>), String> {
|
||||||
@ -352,7 +363,7 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> R
|
|||||||
}
|
}
|
||||||
|
|
||||||
// load spec
|
// load spec
|
||||||
let spec = cmd.spec.spec()?;
|
let spec = cmd.spec.spec(&cmd.dirs.cache)?;
|
||||||
|
|
||||||
// load genesis hash
|
// load genesis hash
|
||||||
let genesis_hash = spec.genesis_header().hash();
|
let genesis_hash = spec.genesis_header().hash();
|
||||||
@ -672,9 +683,11 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> R
|
|||||||
|
|
||||||
// the informant
|
// the informant
|
||||||
let informant = Arc::new(Informant::new(
|
let informant = Arc::new(Informant::new(
|
||||||
service.client(),
|
FullNodeInformantData {
|
||||||
Some(sync_provider.clone()),
|
client: service.client(),
|
||||||
Some(manage_network.clone()),
|
sync: Some(sync_provider.clone()),
|
||||||
|
net: Some(manage_network.clone()),
|
||||||
|
},
|
||||||
Some(snapshot_service.clone()),
|
Some(snapshot_service.clone()),
|
||||||
Some(rpc_stats.clone()),
|
Some(rpc_stats.clone()),
|
||||||
cmd.logger_config.color,
|
cmd.logger_config.color,
|
||||||
|
@ -133,7 +133,7 @@ impl SnapshotCommand {
|
|||||||
// shared portion of snapshot commands: start the client service
|
// shared portion of snapshot commands: start the client service
|
||||||
fn start_service(self) -> Result<ClientService, String> {
|
fn start_service(self) -> Result<ClientService, String> {
|
||||||
// load spec file
|
// load spec file
|
||||||
let spec = self.spec.spec()?;
|
let spec = self.spec.spec(&self.dirs.cache)?;
|
||||||
|
|
||||||
// load genesis hash
|
// load genesis hash
|
||||||
let genesis_hash = spec.genesis_header().hash();
|
let genesis_hash = spec.genesis_header().hash();
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
//! Generic poll manager for Pub-Sub.
|
//! Generic poll manager for Pub-Sub.
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{self, AtomicBool};
|
||||||
use util::Mutex;
|
use util::Mutex;
|
||||||
|
|
||||||
use jsonrpc_core::futures::future::{self, Either};
|
use jsonrpc_core::futures::future::{self, Either};
|
||||||
@ -34,7 +35,8 @@ struct Subscription {
|
|||||||
method: String,
|
method: String,
|
||||||
params: core::Params,
|
params: core::Params,
|
||||||
sink: mpsc::Sender<Result<core::Value, core::Error>>,
|
sink: mpsc::Sender<Result<core::Value, core::Error>>,
|
||||||
last_result: Arc<Mutex<Option<core::Output>>>,
|
/// a flag if subscription is still active and last returned value
|
||||||
|
last_result: Arc<(AtomicBool, Mutex<Option<core::Output>>)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A struct managing all subscriptions.
|
/// A struct managing all subscriptions.
|
||||||
@ -68,10 +70,10 @@ impl<S: core::Middleware<Metadata>> GenericPollManager<S> {
|
|||||||
{
|
{
|
||||||
let (sink, stream) = mpsc::channel(1);
|
let (sink, stream) = mpsc::channel(1);
|
||||||
let subscription = Subscription {
|
let subscription = Subscription {
|
||||||
metadata: metadata,
|
metadata,
|
||||||
method: method,
|
method,
|
||||||
params: params,
|
params,
|
||||||
sink: sink,
|
sink,
|
||||||
last_result: Default::default(),
|
last_result: Default::default(),
|
||||||
};
|
};
|
||||||
let id = self.subscribers.insert(subscription);
|
let id = self.subscribers.insert(subscription);
|
||||||
@ -80,7 +82,9 @@ impl<S: core::Middleware<Metadata>> GenericPollManager<S> {
|
|||||||
|
|
||||||
pub fn unsubscribe(&mut self, id: &SubscriptionId) -> bool {
|
pub fn unsubscribe(&mut self, id: &SubscriptionId) -> bool {
|
||||||
debug!(target: "pubsub", "Removing subscription: {:?}", id);
|
debug!(target: "pubsub", "Removing subscription: {:?}", id);
|
||||||
self.subscribers.remove(id).is_some()
|
self.subscribers.remove(id).map(|subscription| {
|
||||||
|
subscription.last_result.0.store(true, atomic::Ordering::SeqCst);
|
||||||
|
}).is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn tick(&self) -> BoxFuture<(), ()> {
|
pub fn tick(&self) -> BoxFuture<(), ()> {
|
||||||
@ -100,7 +104,12 @@ impl<S: core::Middleware<Metadata>> GenericPollManager<S> {
|
|||||||
let sender = subscription.sink.clone();
|
let sender = subscription.sink.clone();
|
||||||
|
|
||||||
let result = result.and_then(move |response| {
|
let result = result.and_then(move |response| {
|
||||||
let mut last_result = last_result.lock();
|
// quick check if the subscription is still valid
|
||||||
|
if last_result.0.load(atomic::Ordering::SeqCst) {
|
||||||
|
return Either::B(future::ok(()))
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut last_result = last_result.1.lock();
|
||||||
if *last_result != response && response.is_some() {
|
if *last_result != response && response.is_some() {
|
||||||
let output = response.expect("Existence proved by the condition.");
|
let output = response.expect("Existence proved by the condition.");
|
||||||
debug!(target: "pubsub", "Got new response, sending: {:?}", output);
|
debug!(target: "pubsub", "Got new response, sending: {:?}", output);
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
//! rpc integration tests.
|
//! rpc integration tests.
|
||||||
|
use std::env;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
@ -57,6 +58,7 @@ fn miner_service(spec: &Spec, accounts: Arc<AccountProvider>) -> Arc<Miner> {
|
|||||||
force_sealing: true,
|
force_sealing: true,
|
||||||
reseal_on_external_tx: true,
|
reseal_on_external_tx: true,
|
||||||
reseal_on_own_tx: true,
|
reseal_on_own_tx: true,
|
||||||
|
reseal_on_uncle: false,
|
||||||
tx_queue_size: 1024,
|
tx_queue_size: 1024,
|
||||||
tx_gas_limit: !U256::zero(),
|
tx_gas_limit: !U256::zero(),
|
||||||
tx_queue_strategy: PrioritizationStrategy::GasPriceOnly,
|
tx_queue_strategy: PrioritizationStrategy::GasPriceOnly,
|
||||||
@ -318,7 +320,7 @@ const POSITIVE_NONCE_SPEC: &'static [u8] = br#"{
|
|||||||
#[test]
|
#[test]
|
||||||
fn eth_transaction_count() {
|
fn eth_transaction_count() {
|
||||||
let secret = "8a283037bb19c4fed7b1c569e40c7dcff366165eb869110a1b11532963eb9cb2".parse().unwrap();
|
let secret = "8a283037bb19c4fed7b1c569e40c7dcff366165eb869110a1b11532963eb9cb2".parse().unwrap();
|
||||||
let tester = EthTester::from_spec(Spec::load(TRANSACTION_COUNT_SPEC).expect("invalid chain spec"));
|
let tester = EthTester::from_spec(Spec::load(&env::temp_dir(), TRANSACTION_COUNT_SPEC).expect("invalid chain spec"));
|
||||||
let address = tester.accounts.insert_account(secret, "").unwrap();
|
let address = tester.accounts.insert_account(secret, "").unwrap();
|
||||||
tester.accounts.unlock_account_permanently(address, "".into()).unwrap();
|
tester.accounts.unlock_account_permanently(address, "".into()).unwrap();
|
||||||
|
|
||||||
@ -444,7 +446,7 @@ fn verify_transaction_counts(name: String, chain: BlockChain) {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn starting_nonce_test() {
|
fn starting_nonce_test() {
|
||||||
let tester = EthTester::from_spec(Spec::load(POSITIVE_NONCE_SPEC).expect("invalid chain spec"));
|
let tester = EthTester::from_spec(Spec::load(&env::temp_dir(), POSITIVE_NONCE_SPEC).expect("invalid chain spec"));
|
||||||
let address = Address::from(10);
|
let address = Address::from(10);
|
||||||
|
|
||||||
let sample = tester.handler.handle_request_sync(&(r#"
|
let sample = tester.handler.handle_request_sync(&(r#"
|
||||||
|
@ -211,8 +211,12 @@ impl TestNet<Peer> {
|
|||||||
pub fn light(n_light: usize, n_full: usize) -> Self {
|
pub fn light(n_light: usize, n_full: usize) -> Self {
|
||||||
let mut peers = Vec::with_capacity(n_light + n_full);
|
let mut peers = Vec::with_capacity(n_light + n_full);
|
||||||
for _ in 0..n_light {
|
for _ in 0..n_light {
|
||||||
|
let mut config = ::light::client::Config::default();
|
||||||
|
|
||||||
|
// skip full verification because the blocks are bad.
|
||||||
|
config.verify_full = false;
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||||
let client = LightClient::in_memory(Default::default(), &Spec::new_test(), IoChannel::disconnected(), cache);
|
let client = LightClient::in_memory(config, &Spec::new_test(), IoChannel::disconnected(), cache);
|
||||||
peers.push(Arc::new(Peer::new_light(Arc::new(client))))
|
peers.push(Arc::new(Peer::new_light(Arc::new(client))))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user