Integration with zgp whitelist contract (#4215)
* zgp-transactions checker * polishing * rename + refactor * refuse-service-transactions cl option * fixed tests compilation
This commit is contained in:
parent
220084d77d
commit
092e24b9f2
@ -308,22 +308,29 @@ impl TestBlockChainClient {
|
||||
}
|
||||
}
|
||||
|
||||
/// Inserts a transaction to miners transactions queue.
|
||||
pub fn insert_transaction_to_queue(&self) {
|
||||
/// Inserts a transaction with given gas price to miners transactions queue.
|
||||
pub fn insert_transaction_with_gas_price_to_queue(&self, gas_price: U256) -> H256 {
|
||||
let keypair = Random.generate().unwrap();
|
||||
let tx = Transaction {
|
||||
action: Action::Create,
|
||||
value: U256::from(100),
|
||||
data: "3331600055".from_hex().unwrap(),
|
||||
gas: U256::from(100_000),
|
||||
gas_price: U256::from(20_000_000_000u64),
|
||||
gas_price: gas_price,
|
||||
nonce: U256::zero()
|
||||
};
|
||||
let signed_tx = tx.sign(keypair.secret(), None);
|
||||
self.set_balance(signed_tx.sender(), U256::from(10_000_000_000_000_000_000u64));
|
||||
self.set_balance(signed_tx.sender(), 10_000_000_000_000_000_000u64.into());
|
||||
let hash = signed_tx.hash();
|
||||
let res = self.miner.import_external_transactions(self, vec![signed_tx.into()]);
|
||||
let res = res.into_iter().next().unwrap().expect("Successful import");
|
||||
assert_eq!(res, TransactionImportResult::Current);
|
||||
hash
|
||||
}
|
||||
|
||||
/// Inserts a transaction to miners transactions queue.
|
||||
pub fn insert_transaction_to_queue(&self) -> H256 {
|
||||
self.insert_transaction_with_gas_price_to_queue(U256::from(20_000_000_000u64))
|
||||
}
|
||||
|
||||
/// Set reported history size.
|
||||
|
@ -22,7 +22,7 @@ use std::ops::{Deref, DerefMut};
|
||||
use std::cell::Cell;
|
||||
use transaction::{SignedTransaction, Action};
|
||||
use transient_hashmap::TransientHashMap;
|
||||
use miner::{TransactionQueue, TransactionImportResult, TransactionOrigin, AccountDetails};
|
||||
use miner::{TransactionQueue, TransactionQueueDetailsProvider, TransactionImportResult, TransactionOrigin};
|
||||
use miner::transaction_queue::QueuingInstant;
|
||||
use error::{Error, TransactionError};
|
||||
use util::{Uint, U256, H256, Address, Hashable};
|
||||
@ -76,16 +76,12 @@ impl BanningTransactionQueue {
|
||||
|
||||
/// Add to the queue taking bans into consideration.
|
||||
/// May reject transaction because of the banlist.
|
||||
pub fn add_with_banlist<F, G>(
|
||||
pub fn add_with_banlist(
|
||||
&mut self,
|
||||
transaction: SignedTransaction,
|
||||
time: QueuingInstant,
|
||||
account_details: &F,
|
||||
gas_estimator: &G,
|
||||
) -> Result<TransactionImportResult, Error> where
|
||||
F: Fn(&Address) -> AccountDetails,
|
||||
G: Fn(&SignedTransaction) -> U256,
|
||||
{
|
||||
details_provider: &TransactionQueueDetailsProvider,
|
||||
) -> Result<TransactionImportResult, Error> {
|
||||
if let Threshold::BanAfter(threshold) = self.ban_threshold {
|
||||
// NOTE In all checks use direct query to avoid increasing ban timeout.
|
||||
|
||||
@ -116,7 +112,7 @@ impl BanningTransactionQueue {
|
||||
}
|
||||
}
|
||||
}
|
||||
self.queue.add(transaction, TransactionOrigin::External, time, None, account_details, gas_estimator)
|
||||
self.queue.add(transaction, TransactionOrigin::External, time, None, details_provider)
|
||||
}
|
||||
|
||||
/// Ban transaction with given hash.
|
||||
@ -219,22 +215,16 @@ mod tests {
|
||||
use transaction::{Transaction, SignedTransaction, Action};
|
||||
use error::{Error, TransactionError};
|
||||
use client::TransactionImportResult;
|
||||
use miner::{TransactionQueue, TransactionOrigin, AccountDetails};
|
||||
use miner::{TransactionQueue, TransactionOrigin};
|
||||
use util::{Uint, U256, Address, FromHex, Hashable};
|
||||
use miner::transaction_queue::test::DummyTransactionDetailsProvider;
|
||||
|
||||
fn queue() -> BanningTransactionQueue {
|
||||
BanningTransactionQueue::new(TransactionQueue::default(), Threshold::BanAfter(1), Duration::from_secs(180))
|
||||
}
|
||||
|
||||
fn default_account_details(_address: &Address) -> AccountDetails {
|
||||
AccountDetails {
|
||||
nonce: U256::zero(),
|
||||
balance: !U256::zero(),
|
||||
}
|
||||
}
|
||||
|
||||
fn gas_required(_tx: &SignedTransaction) -> U256 {
|
||||
0.into()
|
||||
fn default_tx_provider() -> DummyTransactionDetailsProvider {
|
||||
DummyTransactionDetailsProvider::default().with_account_nonce(U256::zero())
|
||||
}
|
||||
|
||||
fn transaction(action: Action) -> SignedTransaction {
|
||||
@ -264,7 +254,7 @@ mod tests {
|
||||
let mut txq = queue();
|
||||
|
||||
// when
|
||||
txq.queue().add(tx, TransactionOrigin::External, 0, None, &default_account_details, &gas_required).unwrap();
|
||||
txq.queue().add(tx, TransactionOrigin::External, 0, None, &default_tx_provider()).unwrap();
|
||||
|
||||
// then
|
||||
// should also deref to queue
|
||||
@ -280,12 +270,12 @@ mod tests {
|
||||
let banlist1 = txq.ban_sender(tx.sender());
|
||||
assert!(!banlist1, "Threshold not reached yet.");
|
||||
// Insert once
|
||||
let import1 = txq.add_with_banlist(tx.clone(), 0, &default_account_details, &gas_required).unwrap();
|
||||
let import1 = txq.add_with_banlist(tx.clone(), 0, &default_tx_provider()).unwrap();
|
||||
assert_eq!(import1, TransactionImportResult::Current);
|
||||
|
||||
// when
|
||||
let banlist2 = txq.ban_sender(tx.sender());
|
||||
let import2 = txq.add_with_banlist(tx.clone(), 0, &default_account_details, &gas_required);
|
||||
let import2 = txq.add_with_banlist(tx.clone(), 0, &default_tx_provider());
|
||||
|
||||
// then
|
||||
assert!(banlist2, "Threshold should be reached - banned.");
|
||||
@ -304,12 +294,12 @@ mod tests {
|
||||
let banlist1 = txq.ban_recipient(recipient);
|
||||
assert!(!banlist1, "Threshold not reached yet.");
|
||||
// Insert once
|
||||
let import1 = txq.add_with_banlist(tx.clone(), 0, &default_account_details, &gas_required).unwrap();
|
||||
let import1 = txq.add_with_banlist(tx.clone(), 0, &default_tx_provider()).unwrap();
|
||||
assert_eq!(import1, TransactionImportResult::Current);
|
||||
|
||||
// when
|
||||
let banlist2 = txq.ban_recipient(recipient);
|
||||
let import2 = txq.add_with_banlist(tx.clone(), 0, &default_account_details, &gas_required);
|
||||
let import2 = txq.add_with_banlist(tx.clone(), 0, &default_tx_provider());
|
||||
|
||||
// then
|
||||
assert!(banlist2, "Threshold should be reached - banned.");
|
||||
@ -326,12 +316,12 @@ mod tests {
|
||||
let banlist1 = txq.ban_codehash(codehash);
|
||||
assert!(!banlist1, "Threshold not reached yet.");
|
||||
// Insert once
|
||||
let import1 = txq.add_with_banlist(tx.clone(), 0, &default_account_details, &gas_required).unwrap();
|
||||
let import1 = txq.add_with_banlist(tx.clone(), 0, &default_tx_provider()).unwrap();
|
||||
assert_eq!(import1, TransactionImportResult::Current);
|
||||
|
||||
// when
|
||||
let banlist2 = txq.ban_codehash(codehash);
|
||||
let import2 = txq.add_with_banlist(tx.clone(), 0, &default_account_details, &gas_required);
|
||||
let import2 = txq.add_with_banlist(tx.clone(), 0, &default_tx_provider());
|
||||
|
||||
// then
|
||||
assert!(banlist2, "Threshold should be reached - banned.");
|
||||
|
@ -29,11 +29,13 @@ use transaction::{Action, UnverifiedTransaction, PendingTransaction, SignedTrans
|
||||
use receipt::{Receipt, RichReceipt};
|
||||
use spec::Spec;
|
||||
use engines::{Engine, Seal};
|
||||
use miner::{MinerService, MinerStatus, TransactionQueue, PrioritizationStrategy, AccountDetails, TransactionOrigin};
|
||||
use miner::{MinerService, MinerStatus, TransactionQueue, TransactionQueueDetailsProvider, PrioritizationStrategy,
|
||||
AccountDetails, TransactionOrigin};
|
||||
use miner::banning_queue::{BanningTransactionQueue, Threshold};
|
||||
use miner::work_notify::WorkPoster;
|
||||
use miner::price_info::PriceInfo;
|
||||
use miner::local_transactions::{Status as LocalTransactionStatus};
|
||||
use miner::service_transaction_checker::ServiceTransactionChecker;
|
||||
use header::BlockNumber;
|
||||
|
||||
/// Different possible definitions for pending transaction set.
|
||||
@ -102,8 +104,10 @@ pub struct MinerOptions {
|
||||
pub enable_resubmission: bool,
|
||||
/// Global gas limit for all transaction in the queue except for local and retracted.
|
||||
pub tx_queue_gas_limit: GasLimit,
|
||||
/// Banning settings
|
||||
/// Banning settings.
|
||||
pub tx_queue_banning: Banning,
|
||||
/// Do we refuse to accept service transactions even if sender is certified.
|
||||
pub refuse_service_transactions: bool,
|
||||
}
|
||||
|
||||
impl Default for MinerOptions {
|
||||
@ -122,6 +126,7 @@ impl Default for MinerOptions {
|
||||
work_queue_size: 20,
|
||||
enable_resubmission: true,
|
||||
tx_queue_banning: Banning::Disabled,
|
||||
refuse_service_transactions: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -221,6 +226,7 @@ pub struct Miner {
|
||||
accounts: Option<Arc<AccountProvider>>,
|
||||
work_poster: Option<WorkPoster>,
|
||||
gas_pricer: Mutex<GasPricer>,
|
||||
service_transaction_action: ServiceTransactionAction,
|
||||
}
|
||||
|
||||
impl Miner {
|
||||
@ -244,6 +250,10 @@ impl Miner {
|
||||
ban_duration,
|
||||
),
|
||||
};
|
||||
let service_transaction_action = match options.refuse_service_transactions {
|
||||
true => ServiceTransactionAction::Refuse,
|
||||
false => ServiceTransactionAction::Check(ServiceTransactionChecker::default()),
|
||||
};
|
||||
Miner {
|
||||
transaction_queue: Arc::new(Mutex::new(txq)),
|
||||
next_allowed_reseal: Mutex::new(Instant::now()),
|
||||
@ -263,6 +273,7 @@ impl Miner {
|
||||
engine: spec.engine.clone(),
|
||||
work_poster: work_poster,
|
||||
gas_pricer: Mutex::new(gas_pricer),
|
||||
service_transaction_action: service_transaction_action,
|
||||
}
|
||||
}
|
||||
|
||||
@ -526,8 +537,8 @@ impl Miner {
|
||||
}
|
||||
}
|
||||
|
||||
fn update_gas_limit(&self, chain: &MiningBlockChainClient) {
|
||||
let gas_limit = chain.best_block_header().gas_limit();
|
||||
fn update_gas_limit(&self, client: &MiningBlockChainClient) {
|
||||
let gas_limit = client.best_block_header().gas_limit();
|
||||
let mut queue = self.transaction_queue.lock();
|
||||
queue.set_gas_limit(gas_limit);
|
||||
if let GasLimit::Auto = self.options.tx_queue_gas_limit {
|
||||
@ -537,7 +548,7 @@ impl Miner {
|
||||
}
|
||||
|
||||
/// Returns true if we had to prepare new pending block.
|
||||
fn prepare_work_sealing(&self, chain: &MiningBlockChainClient) -> bool {
|
||||
fn prepare_work_sealing(&self, client: &MiningBlockChainClient) -> bool {
|
||||
trace!(target: "miner", "prepare_work_sealing: entering");
|
||||
let prepare_new = {
|
||||
let mut sealing_work = self.sealing_work.lock();
|
||||
@ -555,11 +566,11 @@ impl Miner {
|
||||
// | NOTE Code below requires transaction_queue and sealing_work locks. |
|
||||
// | Make sure to release the locks before calling that method. |
|
||||
// --------------------------------------------------------------------------
|
||||
let (block, original_work_hash) = self.prepare_block(chain);
|
||||
let (block, original_work_hash) = self.prepare_block(client);
|
||||
self.prepare_work(block, original_work_hash);
|
||||
}
|
||||
let mut sealing_block_last_request = self.sealing_block_last_request.lock();
|
||||
let best_number = chain.chain_info().best_block_number;
|
||||
let best_number = client.chain_info().best_block_number;
|
||||
if *sealing_block_last_request != best_number {
|
||||
trace!(target: "miner", "prepare_work_sealing: Miner received request (was {}, now {}) - waking up.", *sealing_block_last_request, best_number);
|
||||
*sealing_block_last_request = best_number;
|
||||
@ -571,31 +582,23 @@ impl Miner {
|
||||
|
||||
fn add_transactions_to_queue(
|
||||
&self,
|
||||
chain: &MiningBlockChainClient,
|
||||
client: &MiningBlockChainClient,
|
||||
transactions: Vec<UnverifiedTransaction>,
|
||||
default_origin: TransactionOrigin,
|
||||
min_block: Option<BlockNumber>,
|
||||
transaction_queue: &mut BanningTransactionQueue,
|
||||
) -> Vec<Result<TransactionImportResult, Error>> {
|
||||
|
||||
let fetch_account = |a: &Address| AccountDetails {
|
||||
nonce: chain.latest_nonce(a),
|
||||
balance: chain.latest_balance(a),
|
||||
};
|
||||
|
||||
let accounts = self.accounts.as_ref()
|
||||
.and_then(|provider| provider.accounts().ok())
|
||||
.map(|accounts| accounts.into_iter().collect::<HashSet<_>>());
|
||||
|
||||
let schedule = chain.latest_schedule();
|
||||
let gas_required = |tx: &SignedTransaction| tx.gas_required(&schedule).into();
|
||||
let best_block_header = chain.best_block_header().decode();
|
||||
let insertion_time = chain.chain_info().best_block_number;
|
||||
let best_block_header = client.best_block_header().decode();
|
||||
let insertion_time = client.chain_info().best_block_number;
|
||||
|
||||
transactions.into_iter()
|
||||
.map(|tx| {
|
||||
let hash = tx.hash();
|
||||
if chain.transaction_block(TransactionId::Hash(hash)).is_some() {
|
||||
if client.transaction_block(TransactionId::Hash(hash)).is_some() {
|
||||
debug!(target: "miner", "Rejected tx {:?}: already in the blockchain", hash);
|
||||
return Err(Error::Transaction(TransactionError::AlreadyImported));
|
||||
}
|
||||
@ -614,13 +617,17 @@ impl Miner {
|
||||
}
|
||||
}).unwrap_or(default_origin);
|
||||
|
||||
// try to install service transaction checker before appending transactions
|
||||
self.service_transaction_action.update_from_chain_client(client);
|
||||
|
||||
let details_provider = TransactionDetailsProvider::new(client, &self.service_transaction_action);
|
||||
match origin {
|
||||
TransactionOrigin::Local | TransactionOrigin::RetractedBlock => {
|
||||
transaction_queue.add(transaction, origin, insertion_time, min_block, &fetch_account, &gas_required)
|
||||
transaction_queue.add(transaction, origin, insertion_time, min_block, &details_provider)
|
||||
},
|
||||
TransactionOrigin::External => {
|
||||
transaction_queue.add_with_banlist(transaction, insertion_time, &fetch_account, &gas_required)
|
||||
}
|
||||
transaction_queue.add_with_banlist(transaction, insertion_time, &details_provider)
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -1158,6 +1165,60 @@ impl MinerService for Miner {
|
||||
}
|
||||
}
|
||||
|
||||
/// Action when service transaction is received
|
||||
enum ServiceTransactionAction {
|
||||
/// Refuse service transaction immediately
|
||||
Refuse,
|
||||
/// Accept if sender is certified to send service transactions
|
||||
Check(ServiceTransactionChecker),
|
||||
}
|
||||
|
||||
impl ServiceTransactionAction {
|
||||
pub fn update_from_chain_client(&self, client: &MiningBlockChainClient) {
|
||||
if let ServiceTransactionAction::Check(ref checker) = *self {
|
||||
checker.update_from_chain_client(client);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check(&self, client: &MiningBlockChainClient, tx: &SignedTransaction) -> Result<bool, String> {
|
||||
match *self {
|
||||
ServiceTransactionAction::Refuse => Err("configured to refuse service transactions".to_owned()),
|
||||
ServiceTransactionAction::Check(ref checker) => checker.check(client, tx),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct TransactionDetailsProvider<'a> {
|
||||
client: &'a MiningBlockChainClient,
|
||||
service_transaction_action: &'a ServiceTransactionAction,
|
||||
}
|
||||
|
||||
impl<'a> TransactionDetailsProvider<'a> {
|
||||
pub fn new(client: &'a MiningBlockChainClient, service_transaction_action: &'a ServiceTransactionAction) -> Self {
|
||||
TransactionDetailsProvider {
|
||||
client: client,
|
||||
service_transaction_action: service_transaction_action,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> TransactionQueueDetailsProvider for TransactionDetailsProvider<'a> {
|
||||
fn fetch_account(&self, address: &Address) -> AccountDetails {
|
||||
AccountDetails {
|
||||
nonce: self.client.latest_nonce(address),
|
||||
balance: self.client.latest_balance(address),
|
||||
}
|
||||
}
|
||||
|
||||
fn estimate_gas_required(&self, tx: &SignedTransaction) -> U256 {
|
||||
tx.gas_required(&self.client.latest_schedule()).into()
|
||||
}
|
||||
|
||||
fn is_service_transaction_acceptable(&self, tx: &SignedTransaction) -> Result<bool, String> {
|
||||
self.service_transaction_action.check(self.client, tx)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@ -1222,6 +1283,7 @@ mod tests {
|
||||
work_queue_size: 5,
|
||||
enable_resubmission: true,
|
||||
tx_queue_banning: Banning::Disabled,
|
||||
refuse_service_transactions: false,
|
||||
},
|
||||
GasPricer::new_fixed(0u64.into()),
|
||||
&Spec::new_test(),
|
||||
|
@ -46,12 +46,14 @@ mod external;
|
||||
mod local_transactions;
|
||||
mod miner;
|
||||
mod price_info;
|
||||
mod service_transaction_checker;
|
||||
mod transaction_queue;
|
||||
mod work_notify;
|
||||
|
||||
pub use self::external::{ExternalMiner, ExternalMinerService};
|
||||
pub use self::miner::{Miner, MinerOptions, Banning, PendingSet, GasPricer, GasPriceCalibratorOptions, GasLimit};
|
||||
pub use self::transaction_queue::{TransactionQueue, PrioritizationStrategy, AccountDetails, TransactionOrigin};
|
||||
pub use self::transaction_queue::{TransactionQueue, TransactionDetailsProvider as TransactionQueueDetailsProvider,
|
||||
PrioritizationStrategy, AccountDetails, TransactionOrigin};
|
||||
pub use self::local_transactions::{Status as LocalTransactionStatus};
|
||||
pub use client::TransactionImportResult;
|
||||
|
||||
|
212
ethcore/src/miner/service_transaction_checker.rs
Normal file
212
ethcore/src/miner/service_transaction_checker.rs
Normal file
@ -0,0 +1,212 @@
|
||||
// Copyright 2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use client::MiningBlockChainClient;
|
||||
use transaction::SignedTransaction;
|
||||
use util::{U256, Uint, Mutex};
|
||||
|
||||
const SERVICE_TRANSACTION_CONTRACT_REGISTRY_NAME: &'static str = "service_transaction_checker";
|
||||
|
||||
/// Service transactions checker.
|
||||
#[derive(Default)]
|
||||
pub struct ServiceTransactionChecker {
|
||||
contract: Mutex<Option<provider::Contract>>,
|
||||
}
|
||||
|
||||
impl ServiceTransactionChecker {
|
||||
/// Try to create instance, reading contract address from given chain client.
|
||||
pub fn update_from_chain_client(&self, client: &MiningBlockChainClient) {
|
||||
let mut contract = self.contract.lock();
|
||||
if contract.is_none() {
|
||||
*contract = client.registry_address(SERVICE_TRANSACTION_CONTRACT_REGISTRY_NAME.to_owned())
|
||||
.and_then(|contract_addr| {
|
||||
trace!(target: "txqueue", "Configuring for service transaction checker contract from {}", contract_addr);
|
||||
|
||||
Some(provider::Contract::new(contract_addr))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if service transaction can be appended to the transaction queue.
|
||||
pub fn check(&self, client: &MiningBlockChainClient, tx: &SignedTransaction) -> Result<bool, String> {
|
||||
debug_assert_eq!(tx.gas_price, U256::zero());
|
||||
|
||||
if let Some(ref contract) = *self.contract.lock() {
|
||||
let do_call = |a, d| client.call_contract(a, d);
|
||||
contract.certified(&do_call, &tx.sender())
|
||||
} else {
|
||||
Err("contract is not configured".to_owned())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod provider {
|
||||
// Autogenerated from JSON contract definition using Rust contract convertor.
|
||||
// Command line: --jsonabi=SimpleCertifier.abi --explicit-do-call
|
||||
#![allow(unused_imports)]
|
||||
use std::string::String;
|
||||
use std::result::Result;
|
||||
use std::fmt;
|
||||
use {util, ethabi};
|
||||
use util::{FixedHash, Uint};
|
||||
|
||||
pub struct Contract {
|
||||
contract: ethabi::Contract,
|
||||
address: util::Address,
|
||||
|
||||
}
|
||||
impl Contract {
|
||||
pub fn new(address: util::Address) -> Self
|
||||
{
|
||||
Contract {
|
||||
contract: ethabi::Contract::new(ethabi::Interface::load(b"[{\"constant\":false,\"inputs\":[{\"name\":\"_new\",\"type\":\"address\"}],\"name\":\"setOwner\",\"outputs\":[],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"}],\"name\":\"certify\",\"outputs\":[],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"},{\"name\":\"_field\",\"type\":\"string\"}],\"name\":\"getAddress\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"}],\"name\":\"revoke\",\"outputs\":[],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"delegate\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"},{\"name\":\"_field\",\"type\":\"string\"}],\"name\":\"getUint\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_new\",\"type\":\"address\"}],\"name\":\"setDelegate\",\"outputs\":[],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"}],\"name\":\"certified\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"},{\"name\":\"_field\",\"type\":\"string\"}],\"name\":\"get\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\"}],\"payable\":false,\"type\":\"function\"}]").expect("JSON is autogenerated; qed")),
|
||||
address: address,
|
||||
|
||||
}
|
||||
}
|
||||
fn as_string<T: fmt::Debug>(e: T) -> String { format!("{:?}", e) }
|
||||
|
||||
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"}`
|
||||
#[allow(dead_code)]
|
||||
pub fn set_owner<F>(&self, do_call: &F, _new: &util::Address) -> Result<(), String>
|
||||
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
|
||||
let call = self.contract.function("setOwner".into()).map_err(Self::as_string)?;
|
||||
let data = call.encode_call(
|
||||
vec![ethabi::Token::Address(_new.clone().0)]
|
||||
).map_err(Self::as_string)?;
|
||||
call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"_who","type":"address"}],"name":"certify","outputs":[],"payable":false,"type":"function"}`
|
||||
#[allow(dead_code)]
|
||||
pub fn certify<F>(&self, do_call: &F, _who: &util::Address) -> Result<(), String>
|
||||
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
|
||||
let call = self.contract.function("certify".into()).map_err(Self::as_string)?;
|
||||
let data = call.encode_call(
|
||||
vec![ethabi::Token::Address(_who.clone().0)]
|
||||
).map_err(Self::as_string)?;
|
||||
call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Auto-generated from: `{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getAddress","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"}`
|
||||
#[allow(dead_code)]
|
||||
pub fn get_address<F>(&self, do_call: &F, _who: &util::Address, _field: &str) -> Result<util::Address, String>
|
||||
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
|
||||
let call = self.contract.function("getAddress".into()).map_err(Self::as_string)?;
|
||||
let data = call.encode_call(
|
||||
vec![ethabi::Token::Address(_who.clone().0), ethabi::Token::String(_field.to_owned())]
|
||||
).map_err(Self::as_string)?;
|
||||
let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
||||
let mut result = output.into_iter().rev().collect::<Vec<_>>();
|
||||
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_address().ok_or("Invalid type returned")?; util::Address::from(r) }))
|
||||
}
|
||||
|
||||
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"_who","type":"address"}],"name":"revoke","outputs":[],"payable":false,"type":"function"}`
|
||||
#[allow(dead_code)]
|
||||
pub fn revoke<F>(&self, do_call: &F, _who: &util::Address) -> Result<(), String>
|
||||
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
|
||||
let call = self.contract.function("revoke".into()).map_err(Self::as_string)?;
|
||||
let data = call.encode_call(
|
||||
vec![ethabi::Token::Address(_who.clone().0)]
|
||||
).map_err(Self::as_string)?;
|
||||
call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Auto-generated from: `{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"}`
|
||||
#[allow(dead_code)]
|
||||
pub fn owner<F>(&self, do_call: &F) -> Result<util::Address, String>
|
||||
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
|
||||
let call = self.contract.function("owner".into()).map_err(Self::as_string)?;
|
||||
let data = call.encode_call(
|
||||
vec![]
|
||||
).map_err(Self::as_string)?;
|
||||
let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
||||
let mut result = output.into_iter().rev().collect::<Vec<_>>();
|
||||
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_address().ok_or("Invalid type returned")?; util::Address::from(r) }))
|
||||
}
|
||||
|
||||
/// Auto-generated from: `{"constant":true,"inputs":[],"name":"delegate","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"}`
|
||||
#[allow(dead_code)]
|
||||
pub fn delegate<F>(&self, do_call: &F) -> Result<util::Address, String>
|
||||
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
|
||||
let call = self.contract.function("delegate".into()).map_err(Self::as_string)?;
|
||||
let data = call.encode_call(
|
||||
vec![]
|
||||
).map_err(Self::as_string)?;
|
||||
let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
||||
let mut result = output.into_iter().rev().collect::<Vec<_>>();
|
||||
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_address().ok_or("Invalid type returned")?; util::Address::from(r) }))
|
||||
}
|
||||
|
||||
/// Auto-generated from: `{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getUint","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"}`
|
||||
#[allow(dead_code)]
|
||||
pub fn get_uint<F>(&self, do_call: &F, _who: &util::Address, _field: &str) -> Result<util::U256, String>
|
||||
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
|
||||
let call = self.contract.function("getUint".into()).map_err(Self::as_string)?;
|
||||
let data = call.encode_call(
|
||||
vec![ethabi::Token::Address(_who.clone().0), ethabi::Token::String(_field.to_owned())]
|
||||
).map_err(Self::as_string)?;
|
||||
let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
||||
let mut result = output.into_iter().rev().collect::<Vec<_>>();
|
||||
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; util::U256::from(r.as_ref()) }))
|
||||
}
|
||||
|
||||
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setDelegate","outputs":[],"payable":false,"type":"function"}`
|
||||
#[allow(dead_code)]
|
||||
pub fn set_delegate<F>(&self, do_call: &F, _new: &util::Address) -> Result<(), String>
|
||||
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
|
||||
let call = self.contract.function("setDelegate".into()).map_err(Self::as_string)?;
|
||||
let data = call.encode_call(
|
||||
vec![ethabi::Token::Address(_new.clone().0)]
|
||||
).map_err(Self::as_string)?;
|
||||
call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Auto-generated from: `{"constant":true,"inputs":[{"name":"_who","type":"address"}],"name":"certified","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"}`
|
||||
#[allow(dead_code)]
|
||||
pub fn certified<F>(&self, do_call: &F, _who: &util::Address) -> Result<bool, String>
|
||||
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
|
||||
let call = self.contract.function("certified".into()).map_err(Self::as_string)?;
|
||||
let data = call.encode_call(
|
||||
vec![ethabi::Token::Address(_who.clone().0)]
|
||||
).map_err(Self::as_string)?;
|
||||
let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
||||
let mut result = output.into_iter().rev().collect::<Vec<_>>();
|
||||
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r }))
|
||||
}
|
||||
|
||||
/// Auto-generated from: `{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"get","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"}`
|
||||
#[allow(dead_code)]
|
||||
pub fn get<F>(&self, do_call: &F, _who: &util::Address, _field: &str) -> Result<util::H256, String>
|
||||
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
|
||||
let call = self.contract.function("get".into()).map_err(Self::as_string)?;
|
||||
let data = call.encode_call(
|
||||
vec![ethabi::Token::Address(_who.clone().0), ethabi::Token::String(_field.to_owned())]
|
||||
).map_err(Self::as_string)?;
|
||||
let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
||||
let mut result = output.into_iter().rev().collect::<Vec<_>>();
|
||||
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_fixed_bytes().ok_or("Invalid type returned")?; util::H256::from_slice(r.as_ref()) }))
|
||||
}
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -90,6 +90,7 @@ tx_time_limit = 100 #ms
|
||||
extra_data = "Parity"
|
||||
remove_solved = false
|
||||
notify_work = ["http://localhost:3001"]
|
||||
refuse_service_transactions = false
|
||||
|
||||
[footprint]
|
||||
tracing = "auto"
|
||||
|
@ -230,6 +230,8 @@ usage! {
|
||||
or |c: &Config| otry!(c.mining).remove_solved.clone(),
|
||||
flag_notify_work: Option<String> = None,
|
||||
or |c: &Config| otry!(c.mining).notify_work.clone().map(|vec| Some(vec.join(","))),
|
||||
flag_refuse_service_transactions: bool = false,
|
||||
or |c: &Config| otry!(c.mining).refuse_service_transactions.clone(),
|
||||
|
||||
// -- Footprint Options
|
||||
flag_tracing: String = "auto",
|
||||
@ -416,6 +418,7 @@ struct Mining {
|
||||
tx_queue_ban_time: Option<u16>,
|
||||
remove_solved: Option<bool>,
|
||||
notify_work: Option<Vec<String>>,
|
||||
refuse_service_transactions: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, PartialEq, RustcDecodable)]
|
||||
@ -633,6 +636,7 @@ mod tests {
|
||||
flag_tx_queue_ban_time: 180u16,
|
||||
flag_remove_solved: false,
|
||||
flag_notify_work: Some("http://localhost:3001".into()),
|
||||
flag_refuse_service_transactions: false,
|
||||
|
||||
// -- Footprint Options
|
||||
flag_tracing: "auto".into(),
|
||||
@ -811,6 +815,7 @@ mod tests {
|
||||
extra_data: None,
|
||||
remove_solved: None,
|
||||
notify_work: None,
|
||||
refuse_service_transactions: None,
|
||||
}),
|
||||
footprint: Some(Footprint {
|
||||
tracing: Some("on".into()),
|
||||
|
@ -22,361 +22,363 @@ Usage:
|
||||
parity db kill [options]
|
||||
|
||||
Operating Options:
|
||||
--mode MODE Set the operating mode. MODE can be one of:
|
||||
last - Uses the last-used mode, active if none.
|
||||
active - Parity continuously syncs the chain.
|
||||
passive - Parity syncs initially, then sleeps and
|
||||
wakes regularly to resync.
|
||||
dark - Parity syncs only when the RPC is active.
|
||||
offline - Parity doesn't sync. (default: {flag_mode}).
|
||||
--mode-timeout SECS Specify the number of seconds before inactivity
|
||||
timeout occurs when mode is dark or passive
|
||||
(default: {flag_mode_timeout}).
|
||||
--mode-alarm SECS Specify the number of seconds before auto sleep
|
||||
reawake timeout occurs when mode is passive
|
||||
(default: {flag_mode_alarm}).
|
||||
--auto-update SET Set a releases set to automatically update and
|
||||
install.
|
||||
all - All updates in the our release track.
|
||||
critical - Only consensus/security updates.
|
||||
none - No updates will be auto-installed.
|
||||
(default: {flag_auto_update}).
|
||||
--release-track TRACK Set which release track we should use for updates.
|
||||
stable - Stable releases.
|
||||
beta - Beta releases.
|
||||
nightly - Nightly releases (unstable).
|
||||
testing - Testing releases (do not use).
|
||||
current - Whatever track this executable was
|
||||
released on (default: {flag_release_track}).
|
||||
--no-download Normally new releases will be downloaded ready for
|
||||
updating. This disables it. Not recommended.
|
||||
(default: {flag_no_download}).
|
||||
--no-consensus Force the binary to run even if there are known
|
||||
issues regarding consensus. Not recommended.
|
||||
(default: {flag_no_consensus}).
|
||||
--force-direct Run the originally installed version of Parity,
|
||||
ignoring any updates that have since been installed.
|
||||
--chain CHAIN Specify the blockchain type. CHAIN may be either a
|
||||
JSON chain specification file or olympic, frontier,
|
||||
homestead, mainnet, morden, ropsten, classic, expanse,
|
||||
testnet or dev (default: {flag_chain}).
|
||||
-d --base-path PATH Specify the base data storage path.
|
||||
(default: {flag_base_path}).
|
||||
--db-path PATH Specify the database directory path
|
||||
(default: {flag_db_path}).
|
||||
--keys-path PATH Specify the path for JSON key files to be found
|
||||
(default: {flag_keys_path}).
|
||||
--identity NAME Specify your node's name. (default: {flag_identity})
|
||||
--mode MODE Set the operating mode. MODE can be one of:
|
||||
last - Uses the last-used mode, active if none.
|
||||
active - Parity continuously syncs the chain.
|
||||
passive - Parity syncs initially, then sleeps and
|
||||
wakes regularly to resync.
|
||||
dark - Parity syncs only when the RPC is active.
|
||||
offline - Parity doesn't sync. (default: {flag_mode}).
|
||||
--mode-timeout SECS Specify the number of seconds before inactivity
|
||||
timeout occurs when mode is dark or passive
|
||||
(default: {flag_mode_timeout}).
|
||||
--mode-alarm SECS Specify the number of seconds before auto sleep
|
||||
reawake timeout occurs when mode is passive
|
||||
(default: {flag_mode_alarm}).
|
||||
--auto-update SET Set a releases set to automatically update and
|
||||
install.
|
||||
all - All updates in the our release track.
|
||||
critical - Only consensus/security updates.
|
||||
none - No updates will be auto-installed.
|
||||
(default: {flag_auto_update}).
|
||||
--release-track TRACK Set which release track we should use for updates.
|
||||
stable - Stable releases.
|
||||
beta - Beta releases.
|
||||
nightly - Nightly releases (unstable).
|
||||
testing - Testing releases (do not use).
|
||||
current - Whatever track this executable was
|
||||
released on (default: {flag_release_track}).
|
||||
--no-download Normally new releases will be downloaded ready for
|
||||
updating. This disables it. Not recommended.
|
||||
(default: {flag_no_download}).
|
||||
--no-consensus Force the binary to run even if there are known
|
||||
issues regarding consensus. Not recommended.
|
||||
(default: {flag_no_consensus}).
|
||||
--force-direct Run the originally installed version of Parity,
|
||||
ignoring any updates that have since been installed.
|
||||
--chain CHAIN Specify the blockchain type. CHAIN may be either a
|
||||
JSON chain specification file or olympic, frontier,
|
||||
homestead, mainnet, morden, ropsten, classic, expanse,
|
||||
testnet or dev (default: {flag_chain}).
|
||||
-d --base-path PATH Specify the base data storage path.
|
||||
(default: {flag_base_path}).
|
||||
--db-path PATH Specify the database directory path
|
||||
(default: {flag_db_path}).
|
||||
--keys-path PATH Specify the path for JSON key files to be found
|
||||
(default: {flag_keys_path}).
|
||||
--identity NAME Specify your node's name. (default: {flag_identity})
|
||||
|
||||
Account Options:
|
||||
--unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution.
|
||||
ACCOUNTS is a comma-delimited list of addresses.
|
||||
Implies --no-ui. (default: {flag_unlock:?})
|
||||
--password FILE Provide a file containing a password for unlocking
|
||||
an account. Leading and trailing whitespace is trimmed.
|
||||
(default: {flag_password:?})
|
||||
--keys-iterations NUM Specify the number of iterations to use when
|
||||
deriving key from the password (bigger is more
|
||||
secure) (default: {flag_keys_iterations}).
|
||||
--unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution.
|
||||
ACCOUNTS is a comma-delimited list of addresses.
|
||||
Implies --no-ui. (default: {flag_unlock:?})
|
||||
--password FILE Provide a file containing a password for unlocking
|
||||
an account. Leading and trailing whitespace is trimmed.
|
||||
(default: {flag_password:?})
|
||||
--keys-iterations NUM Specify the number of iterations to use when
|
||||
deriving key from the password (bigger is more
|
||||
secure) (default: {flag_keys_iterations}).
|
||||
|
||||
UI Options:
|
||||
--force-ui Enable Trusted UI WebSocket endpoint,
|
||||
even when --unlock is in use. (default: ${flag_force_ui})
|
||||
--no-ui Disable Trusted UI WebSocket endpoint.
|
||||
(default: ${flag_no_ui})
|
||||
--ui-port PORT Specify the port of Trusted UI server
|
||||
(default: {flag_ui_port}).
|
||||
--ui-interface IP Specify the hostname portion of the Trusted UI
|
||||
server, IP should be an interface's IP address,
|
||||
or local (default: {flag_ui_interface}).
|
||||
--ui-path PATH Specify directory where Trusted UIs tokens should
|
||||
be stored. (default: {flag_ui_path})
|
||||
--ui-no-validation Disable Origin and Host headers validation for
|
||||
Trusted UI. WARNING: INSECURE. Used only for
|
||||
development. (default: {flag_ui_no_validation})
|
||||
--force-ui Enable Trusted UI WebSocket endpoint,
|
||||
even when --unlock is in use. (default: ${flag_force_ui})
|
||||
--no-ui Disable Trusted UI WebSocket endpoint.
|
||||
(default: ${flag_no_ui})
|
||||
--ui-port PORT Specify the port of Trusted UI server
|
||||
(default: {flag_ui_port}).
|
||||
--ui-interface IP Specify the hostname portion of the Trusted UI
|
||||
server, IP should be an interface's IP address,
|
||||
or local (default: {flag_ui_interface}).
|
||||
--ui-path PATH Specify directory where Trusted UIs tokens should
|
||||
be stored. (default: {flag_ui_path})
|
||||
--ui-no-validation Disable Origin and Host headers validation for
|
||||
Trusted UI. WARNING: INSECURE. Used only for
|
||||
development. (default: {flag_ui_no_validation})
|
||||
|
||||
Networking Options:
|
||||
--warp Enable syncing from the snapshot over the network. (default: {flag_warp})
|
||||
--port PORT Override the port on which the node should listen
|
||||
(default: {flag_port}).
|
||||
--min-peers NUM Try to maintain at least NUM peers (default: {flag_min_peers}).
|
||||
--max-peers NUM Allow up to NUM peers (default: {flag_max_peers}).
|
||||
--snapshot-peers NUM Allow additional NUM peers for a snapshot sync
|
||||
(default: {flag_snapshot_peers}).
|
||||
--nat METHOD Specify method to use for determining public
|
||||
address. Must be one of: any, none, upnp,
|
||||
extip:<IP> (default: {flag_nat}).
|
||||
--network-id INDEX Override the network identifier from the chain we
|
||||
are on. (default: {flag_network_id:?})
|
||||
--bootnodes NODES Override the bootnodes from our chain. NODES should
|
||||
be comma-delimited enodes. (default: {flag_bootnodes:?})
|
||||
--no-discovery Disable new peer discovery. (default: {flag_no_discovery})
|
||||
--node-key KEY Specify node secret key, either as 64-character hex
|
||||
string or input to SHA3 operation. (default: {flag_node_key:?})
|
||||
--reserved-peers FILE Provide a file containing enodes, one per line.
|
||||
These nodes will always have a reserved slot on top
|
||||
of the normal maximum peers. (default: {flag_reserved_peers:?})
|
||||
--reserved-only Connect only to reserved nodes. (default: {flag_reserved_only})
|
||||
--allow-ips FILTER Filter outbound connections. Must be one of:
|
||||
private - connect to private network IP addresses only;
|
||||
public - connect to public network IP addresses only;
|
||||
all - connect to any IP address.
|
||||
(default: {flag_allow_ips})
|
||||
--max-pending-peers NUM Allow up to NUM pending connections. (default: {flag_max_pending_peers})
|
||||
--no-ancient-blocks Disable downloading old blocks after snapshot restoration
|
||||
or warp sync. (default: {flag_no_ancient_blocks})
|
||||
--warp Enable syncing from the snapshot over the network. (default: {flag_warp})
|
||||
--port PORT Override the port on which the node should listen
|
||||
(default: {flag_port}).
|
||||
--min-peers NUM Try to maintain at least NUM peers (default: {flag_min_peers}).
|
||||
--max-peers NUM Allow up to NUM peers (default: {flag_max_peers}).
|
||||
--snapshot-peers NUM Allow additional NUM peers for a snapshot sync
|
||||
(default: {flag_snapshot_peers}).
|
||||
--nat METHOD Specify method to use for determining public
|
||||
address. Must be one of: any, none, upnp,
|
||||
extip:<IP> (default: {flag_nat}).
|
||||
--network-id INDEX Override the network identifier from the chain we
|
||||
are on. (default: {flag_network_id:?})
|
||||
--bootnodes NODES Override the bootnodes from our chain. NODES should
|
||||
be comma-delimited enodes. (default: {flag_bootnodes:?})
|
||||
--no-discovery Disable new peer discovery. (default: {flag_no_discovery})
|
||||
--node-key KEY Specify node secret key, either as 64-character hex
|
||||
string or input to SHA3 operation. (default: {flag_node_key:?})
|
||||
--reserved-peers FILE Provide a file containing enodes, one per line.
|
||||
These nodes will always have a reserved slot on top
|
||||
of the normal maximum peers. (default: {flag_reserved_peers:?})
|
||||
--reserved-only Connect only to reserved nodes. (default: {flag_reserved_only})
|
||||
--allow-ips FILTER Filter outbound connections. Must be one of:
|
||||
private - connect to private network IP addresses only;
|
||||
public - connect to public network IP addresses only;
|
||||
all - connect to any IP address.
|
||||
(default: {flag_allow_ips})
|
||||
--max-pending-peers NUM Allow up to NUM pending connections. (default: {flag_max_pending_peers})
|
||||
--no-ancient-blocks Disable downloading old blocks after snapshot restoration
|
||||
or warp sync. (default: {flag_no_ancient_blocks})
|
||||
|
||||
API and Console Options:
|
||||
--no-jsonrpc Disable the JSON-RPC API server. (default: {flag_no_jsonrpc})
|
||||
--jsonrpc-port PORT Specify the port portion of the JSONRPC API server
|
||||
(default: {flag_jsonrpc_port}).
|
||||
--jsonrpc-interface IP Specify the hostname portion of the JSONRPC API
|
||||
server, IP should be an interface's IP address, or
|
||||
all (all interfaces) or local (default: {flag_jsonrpc_interface}).
|
||||
--jsonrpc-cors URL Specify CORS header for JSON-RPC API responses.
|
||||
(default: {flag_jsonrpc_cors:?})
|
||||
--jsonrpc-apis APIS Specify the APIs available through the JSONRPC
|
||||
interface. APIS is a comma-delimited list of API
|
||||
name. Possible name are web3, eth, net, personal,
|
||||
parity, parity_set, traces, rpc, parity_accounts.
|
||||
(default: {flag_jsonrpc_apis}).
|
||||
--jsonrpc-hosts HOSTS List of allowed Host header values. This option will
|
||||
validate the Host header sent by the browser, it
|
||||
is additional security against some attack
|
||||
vectors. Special options: "all", "none",
|
||||
(default: {flag_jsonrpc_hosts}).
|
||||
--no-jsonrpc Disable the JSON-RPC API server. (default: {flag_no_jsonrpc})
|
||||
--jsonrpc-port PORT Specify the port portion of the JSONRPC API server
|
||||
(default: {flag_jsonrpc_port}).
|
||||
--jsonrpc-interface IP Specify the hostname portion of the JSONRPC API
|
||||
server, IP should be an interface's IP address, or
|
||||
all (all interfaces) or local (default: {flag_jsonrpc_interface}).
|
||||
--jsonrpc-cors URL Specify CORS header for JSON-RPC API responses.
|
||||
(default: {flag_jsonrpc_cors:?})
|
||||
--jsonrpc-apis APIS Specify the APIs available through the JSONRPC
|
||||
interface. APIS is a comma-delimited list of API
|
||||
name. Possible name are web3, eth, net, personal,
|
||||
parity, parity_set, traces, rpc, parity_accounts.
|
||||
(default: {flag_jsonrpc_apis}).
|
||||
--jsonrpc-hosts HOSTS List of allowed Host header values. This option will
|
||||
validate the Host header sent by the browser, it
|
||||
is additional security against some attack
|
||||
vectors. Special options: "all", "none",
|
||||
(default: {flag_jsonrpc_hosts}).
|
||||
|
||||
--no-ipc Disable JSON-RPC over IPC service. (default: {flag_no_ipc})
|
||||
--ipc-path PATH Specify custom path for JSON-RPC over IPC service
|
||||
(default: {flag_ipc_path}).
|
||||
--ipc-apis APIS Specify custom API set available via JSON-RPC over
|
||||
IPC (default: {flag_ipc_apis}).
|
||||
--no-ipc Disable JSON-RPC over IPC service. (default: {flag_no_ipc})
|
||||
--ipc-path PATH Specify custom path for JSON-RPC over IPC service
|
||||
(default: {flag_ipc_path}).
|
||||
--ipc-apis APIS Specify custom API set available via JSON-RPC over
|
||||
IPC (default: {flag_ipc_apis}).
|
||||
|
||||
--no-dapps Disable the Dapps server (e.g. status page). (default: {flag_no_dapps})
|
||||
--dapps-port PORT Specify the port portion of the Dapps server
|
||||
(default: {flag_dapps_port}).
|
||||
--dapps-interface IP Specify the hostname portion of the Dapps
|
||||
server, IP should be an interface's IP address,
|
||||
or local (default: {flag_dapps_interface}).
|
||||
--dapps-hosts HOSTS List of allowed Host header values. This option will
|
||||
validate the Host header sent by the browser, it
|
||||
is additional security against some attack
|
||||
vectors. Special options: "all", "none",
|
||||
(default: {flag_dapps_hosts}).
|
||||
--dapps-user USERNAME Specify username for Dapps server. It will be
|
||||
used in HTTP Basic Authentication Scheme.
|
||||
If --dapps-pass is not specified you will be
|
||||
asked for password on startup. (default: {flag_dapps_user:?})
|
||||
--dapps-pass PASSWORD Specify password for Dapps server. Use only in
|
||||
conjunction with --dapps-user. (default: {flag_dapps_pass:?})
|
||||
--dapps-path PATH Specify directory where dapps should be installed.
|
||||
(default: {flag_dapps_path})
|
||||
--no-dapps Disable the Dapps server (e.g. status page). (default: {flag_no_dapps})
|
||||
--dapps-port PORT Specify the port portion of the Dapps server
|
||||
(default: {flag_dapps_port}).
|
||||
--dapps-interface IP Specify the hostname portion of the Dapps
|
||||
server, IP should be an interface's IP address,
|
||||
or local (default: {flag_dapps_interface}).
|
||||
--dapps-hosts HOSTS List of allowed Host header values. This option will
|
||||
validate the Host header sent by the browser, it
|
||||
is additional security against some attack
|
||||
vectors. Special options: "all", "none",
|
||||
(default: {flag_dapps_hosts}).
|
||||
--dapps-user USERNAME Specify username for Dapps server. It will be
|
||||
used in HTTP Basic Authentication Scheme.
|
||||
If --dapps-pass is not specified you will be
|
||||
asked for password on startup. (default: {flag_dapps_user:?})
|
||||
--dapps-pass PASSWORD Specify password for Dapps server. Use only in
|
||||
conjunction with --dapps-user. (default: {flag_dapps_pass:?})
|
||||
--dapps-path PATH Specify directory where dapps should be installed.
|
||||
(default: {flag_dapps_path})
|
||||
|
||||
Sealing/Mining Options:
|
||||
--author ADDRESS Specify the block author (aka "coinbase") address
|
||||
for sending block rewards from sealed blocks.
|
||||
NOTE: MINING WILL NOT WORK WITHOUT THIS OPTION.
|
||||
(default: {flag_author:?})
|
||||
--engine-signer ADDRESS Specify the address which should be used to
|
||||
sign consensus messages and issue blocks.
|
||||
Relevant only to non-PoW chains.
|
||||
(default: {flag_engine_signer:?})
|
||||
--force-sealing Force the node to author new blocks as if it were
|
||||
always sealing/mining.
|
||||
(default: {flag_force_sealing})
|
||||
--reseal-on-txs SET Specify which transactions should force the node
|
||||
to reseal a block. SET is one of:
|
||||
none - never reseal on new transactions;
|
||||
own - reseal only on a new local transaction;
|
||||
ext - reseal only on a new external transaction;
|
||||
all - reseal on all new transactions
|
||||
(default: {flag_reseal_on_txs}).
|
||||
--reseal-min-period MS Specify the minimum time between reseals from
|
||||
incoming transactions. MS is time measured in
|
||||
milliseconds (default: {flag_reseal_min_period}).
|
||||
--work-queue-size ITEMS Specify the number of historical work packages
|
||||
which are kept cached lest a solution is found for
|
||||
them later. High values take more memory but result
|
||||
in fewer unusable solutions (default: {flag_work_queue_size}).
|
||||
--tx-gas-limit GAS Apply a limit of GAS as the maximum amount of gas
|
||||
a single transaction may have for it to be mined.
|
||||
(default: {flag_tx_gas_limit:?})
|
||||
--tx-time-limit MS Maximal time for processing single transaction.
|
||||
If enabled senders/recipients/code of transactions
|
||||
offending the limit will be banned from being included
|
||||
in transaction queue for 180 seconds.
|
||||
(default: {flag_tx_time_limit:?})
|
||||
--relay-set SET Set of transactions to relay. SET may be:
|
||||
cheap - Relay any transaction in the queue (this
|
||||
may include invalid transactions);
|
||||
strict - Relay only executed transactions (this
|
||||
guarantees we don't relay invalid transactions, but
|
||||
means we relay nothing if not mining);
|
||||
lenient - Same as strict when mining, and cheap
|
||||
when not (default: {flag_relay_set}).
|
||||
--usd-per-tx USD Amount of USD to be paid for a basic transaction
|
||||
(default: {flag_usd_per_tx}). The minimum gas price is set
|
||||
accordingly.
|
||||
--usd-per-eth SOURCE USD value of a single ETH. SOURCE may be either an
|
||||
amount in USD, a web service or 'auto' to use each
|
||||
web service in turn and fallback on the last known
|
||||
good value (default: {flag_usd_per_eth}).
|
||||
--price-update-period T T will be allowed to pass between each gas price
|
||||
update. T may be daily, hourly, a number of seconds,
|
||||
or a time string of the form "2 days", "30 minutes"
|
||||
etc. (default: {flag_price_update_period}).
|
||||
--gas-floor-target GAS Amount of gas per block to target when sealing a new
|
||||
block (default: {flag_gas_floor_target}).
|
||||
--gas-cap GAS A cap on how large we will raise the gas limit per
|
||||
block due to transaction volume (default: {flag_gas_cap}).
|
||||
--extra-data STRING Specify a custom extra-data for authored blocks, no
|
||||
more than 32 characters. (default: {flag_extra_data:?})
|
||||
--tx-queue-size LIMIT Maximum amount of transactions in the queue (waiting
|
||||
to be included in next block) (default: {flag_tx_queue_size}).
|
||||
--tx-queue-gas LIMIT Maximum amount of total gas for external transactions in
|
||||
the queue. LIMIT can be either an amount of gas or
|
||||
'auto' or 'off'. 'auto' sets the limit to be 20x
|
||||
the current block gas limit. (default: {flag_tx_queue_gas}).
|
||||
--tx-queue-strategy S Prioritization strategy used to order transactions
|
||||
in the queue. S may be:
|
||||
gas - Prioritize txs with low gas limit;
|
||||
gas_price - Prioritize txs with high gas price;
|
||||
gas_factor - Prioritize txs using gas price
|
||||
and gas limit ratio (default: {flag_tx_queue_strategy}).
|
||||
--tx-queue-ban-count C Number of times maximal time for execution (--tx-time-limit)
|
||||
can be exceeded before banning sender/recipient/code.
|
||||
(default: {flag_tx_queue_ban_count})
|
||||
--tx-queue-ban-time SEC Banning time (in seconds) for offenders of specified
|
||||
execution time limit. Also number of offending actions
|
||||
have to reach the threshold within that time.
|
||||
(default: {flag_tx_queue_ban_time} seconds)
|
||||
--remove-solved Move solved blocks from the work package queue
|
||||
instead of cloning them. This gives a slightly
|
||||
faster import speed, but means that extra solutions
|
||||
submitted for the same work package will go unused.
|
||||
(default: {flag_remove_solved})
|
||||
--notify-work URLS URLs to which work package notifications are pushed.
|
||||
URLS should be a comma-delimited list of HTTP URLs.
|
||||
(default: {flag_notify_work:?})
|
||||
--author ADDRESS Specify the block author (aka "coinbase") address
|
||||
for sending block rewards from sealed blocks.
|
||||
NOTE: MINING WILL NOT WORK WITHOUT THIS OPTION.
|
||||
(default: {flag_author:?})
|
||||
--engine-signer ADDRESS Specify the address which should be used to
|
||||
sign consensus messages and issue blocks.
|
||||
Relevant only to non-PoW chains.
|
||||
(default: {flag_engine_signer:?})
|
||||
--force-sealing Force the node to author new blocks as if it were
|
||||
always sealing/mining.
|
||||
(default: {flag_force_sealing})
|
||||
--reseal-on-txs SET Specify which transactions should force the node
|
||||
to reseal a block. SET is one of:
|
||||
none - never reseal on new transactions;
|
||||
own - reseal only on a new local transaction;
|
||||
ext - reseal only on a new external transaction;
|
||||
all - reseal on all new transactions
|
||||
(default: {flag_reseal_on_txs}).
|
||||
--reseal-min-period MS Specify the minimum time between reseals from
|
||||
incoming transactions. MS is time measured in
|
||||
milliseconds (default: {flag_reseal_min_period}).
|
||||
--work-queue-size ITEMS Specify the number of historical work packages
|
||||
which are kept cached lest a solution is found for
|
||||
them later. High values take more memory but result
|
||||
in fewer unusable solutions (default: {flag_work_queue_size}).
|
||||
--tx-gas-limit GAS Apply a limit of GAS as the maximum amount of gas
|
||||
a single transaction may have for it to be mined.
|
||||
(default: {flag_tx_gas_limit:?})
|
||||
--tx-time-limit MS Maximal time for processing single transaction.
|
||||
If enabled senders/recipients/code of transactions
|
||||
offending the limit will be banned from being included
|
||||
in transaction queue for 180 seconds.
|
||||
(default: {flag_tx_time_limit:?})
|
||||
--relay-set SET Set of transactions to relay. SET may be:
|
||||
cheap - Relay any transaction in the queue (this
|
||||
may include invalid transactions);
|
||||
strict - Relay only executed transactions (this
|
||||
guarantees we don't relay invalid transactions, but
|
||||
means we relay nothing if not mining);
|
||||
lenient - Same as strict when mining, and cheap
|
||||
when not (default: {flag_relay_set}).
|
||||
--usd-per-tx USD Amount of USD to be paid for a basic transaction
|
||||
(default: {flag_usd_per_tx}). The minimum gas price is set
|
||||
accordingly.
|
||||
--usd-per-eth SOURCE USD value of a single ETH. SOURCE may be either an
|
||||
amount in USD, a web service or 'auto' to use each
|
||||
web service in turn and fallback on the last known
|
||||
good value (default: {flag_usd_per_eth}).
|
||||
--price-update-period T T will be allowed to pass between each gas price
|
||||
update. T may be daily, hourly, a number of seconds,
|
||||
or a time string of the form "2 days", "30 minutes"
|
||||
etc. (default: {flag_price_update_period}).
|
||||
--gas-floor-target GAS Amount of gas per block to target when sealing a new
|
||||
block (default: {flag_gas_floor_target}).
|
||||
--gas-cap GAS A cap on how large we will raise the gas limit per
|
||||
block due to transaction volume (default: {flag_gas_cap}).
|
||||
--extra-data STRING Specify a custom extra-data for authored blocks, no
|
||||
more than 32 characters. (default: {flag_extra_data:?})
|
||||
--tx-queue-size LIMIT Maximum amount of transactions in the queue (waiting
|
||||
to be included in next block) (default: {flag_tx_queue_size}).
|
||||
--tx-queue-gas LIMIT Maximum amount of total gas for external transactions in
|
||||
the queue. LIMIT can be either an amount of gas or
|
||||
'auto' or 'off'. 'auto' sets the limit to be 20x
|
||||
the current block gas limit. (default: {flag_tx_queue_gas}).
|
||||
--tx-queue-strategy S Prioritization strategy used to order transactions
|
||||
in the queue. S may be:
|
||||
gas - Prioritize txs with low gas limit;
|
||||
gas_price - Prioritize txs with high gas price;
|
||||
gas_factor - Prioritize txs using gas price
|
||||
and gas limit ratio (default: {flag_tx_queue_strategy}).
|
||||
--tx-queue-ban-count C Number of times maximal time for execution (--tx-time-limit)
|
||||
can be exceeded before banning sender/recipient/code.
|
||||
(default: {flag_tx_queue_ban_count})
|
||||
--tx-queue-ban-time SEC Banning time (in seconds) for offenders of specified
|
||||
execution time limit. Also number of offending actions
|
||||
have to reach the threshold within that time.
|
||||
(default: {flag_tx_queue_ban_time} seconds)
|
||||
--remove-solved Move solved blocks from the work package queue
|
||||
instead of cloning them. This gives a slightly
|
||||
faster import speed, but means that extra solutions
|
||||
submitted for the same work package will go unused.
|
||||
(default: {flag_remove_solved})
|
||||
--notify-work URLS URLs to which work package notifications are pushed.
|
||||
URLS should be a comma-delimited list of HTTP URLs.
|
||||
(default: {flag_notify_work:?})
|
||||
--refuse-service-transactions Always refuse service transactions.
|
||||
(default: {flag_refuse_service_transactions}).
|
||||
|
||||
Footprint Options:
|
||||
--tracing BOOL Indicates if full transaction tracing should be
|
||||
enabled. Works only if client had been fully synced
|
||||
with tracing enabled. BOOL may be one of auto, on,
|
||||
off. auto uses last used value of this option (off
|
||||
if it does not exist) (default: {flag_tracing}).
|
||||
--pruning METHOD Configure pruning of the state/storage trie. METHOD
|
||||
may be one of auto, archive, fast:
|
||||
archive - keep all state trie data. No pruning.
|
||||
fast - maintain journal overlay. Fast but 50MB used.
|
||||
auto - use the method most recently synced or
|
||||
default to fast if none synced (default: {flag_pruning}).
|
||||
--pruning-history NUM Set a minimum number of recent states to keep when pruning
|
||||
is active. (default: {flag_pruning_history}).
|
||||
--pruning-memory MB The ideal amount of memory in megabytes to use to store
|
||||
recent states. As many states as possible will be kept
|
||||
within this limit, and at least --pruning-history states
|
||||
will always be kept. (default: {flag_pruning_memory})
|
||||
--cache-size-db MB Override database cache size (default: {flag_cache_size_db}).
|
||||
--cache-size-blocks MB Specify the prefered size of the blockchain cache in
|
||||
megabytes (default: {flag_cache_size_blocks}).
|
||||
--cache-size-queue MB Specify the maximum size of memory to use for block
|
||||
queue (default: {flag_cache_size_queue}).
|
||||
--cache-size-state MB Specify the maximum size of memory to use for
|
||||
the state cache (default: {flag_cache_size_state}).
|
||||
--cache-size MB Set total amount of discretionary memory to use for
|
||||
the entire system, overrides other cache and queue
|
||||
options. (default: {flag_cache_size:?})
|
||||
--fast-and-loose Disables DB WAL, which gives a significant speed up
|
||||
but means an unclean exit is unrecoverable. (default: {flag_fast_and_loose})
|
||||
--db-compaction TYPE Database compaction type. TYPE may be one of:
|
||||
ssd - suitable for SSDs and fast HDDs;
|
||||
hdd - suitable for slow HDDs;
|
||||
auto - determine automatically (default: {flag_db_compaction}).
|
||||
--fat-db BOOL Build appropriate information to allow enumeration
|
||||
of all accounts and storage keys. Doubles the size
|
||||
of the state database. BOOL may be one of on, off
|
||||
or auto. (default: {flag_fat_db})
|
||||
--scale-verifiers Automatically scale amount of verifier threads based on
|
||||
workload. Not guaranteed to be faster.
|
||||
(default: {flag_scale_verifiers})
|
||||
--num-verifiers INT Amount of verifier threads to use or to begin with, if verifier
|
||||
auto-scaling is enabled. (default: {flag_num_verifiers:?})
|
||||
--tracing BOOL Indicates if full transaction tracing should be
|
||||
enabled. Works only if client had been fully synced
|
||||
with tracing enabled. BOOL may be one of auto, on,
|
||||
off. auto uses last used value of this option (off
|
||||
if it does not exist) (default: {flag_tracing}).
|
||||
--pruning METHOD Configure pruning of the state/storage trie. METHOD
|
||||
may be one of auto, archive, fast:
|
||||
archive - keep all state trie data. No pruning.
|
||||
fast - maintain journal overlay. Fast but 50MB used.
|
||||
auto - use the method most recently synced or
|
||||
default to fast if none synced (default: {flag_pruning}).
|
||||
--pruning-history NUM Set a minimum number of recent states to keep when pruning
|
||||
is active. (default: {flag_pruning_history}).
|
||||
--pruning-memory MB The ideal amount of memory in megabytes to use to store
|
||||
recent states. As many states as possible will be kept
|
||||
within this limit, and at least --pruning-history states
|
||||
will always be kept. (default: {flag_pruning_memory})
|
||||
--cache-size-db MB Override database cache size (default: {flag_cache_size_db}).
|
||||
--cache-size-blocks MB Specify the prefered size of the blockchain cache in
|
||||
megabytes (default: {flag_cache_size_blocks}).
|
||||
--cache-size-queue MB Specify the maximum size of memory to use for block
|
||||
queue (default: {flag_cache_size_queue}).
|
||||
--cache-size-state MB Specify the maximum size of memory to use for
|
||||
the state cache (default: {flag_cache_size_state}).
|
||||
--cache-size MB Set total amount of discretionary memory to use for
|
||||
the entire system, overrides other cache and queue
|
||||
options. (default: {flag_cache_size:?})
|
||||
--fast-and-loose Disables DB WAL, which gives a significant speed up
|
||||
but means an unclean exit is unrecoverable. (default: {flag_fast_and_loose})
|
||||
--db-compaction TYPE Database compaction type. TYPE may be one of:
|
||||
ssd - suitable for SSDs and fast HDDs;
|
||||
hdd - suitable for slow HDDs;
|
||||
auto - determine automatically (default: {flag_db_compaction}).
|
||||
--fat-db BOOL Build appropriate information to allow enumeration
|
||||
of all accounts and storage keys. Doubles the size
|
||||
of the state database. BOOL may be one of on, off
|
||||
or auto. (default: {flag_fat_db})
|
||||
--scale-verifiers Automatically scale amount of verifier threads based on
|
||||
workload. Not guaranteed to be faster.
|
||||
(default: {flag_scale_verifiers})
|
||||
--num-verifiers INT Amount of verifier threads to use or to begin with, if verifier
|
||||
auto-scaling is enabled. (default: {flag_num_verifiers:?})
|
||||
|
||||
Import/Export Options:
|
||||
--from BLOCK Export from block BLOCK, which may be an index or
|
||||
hash (default: {flag_from}).
|
||||
--to BLOCK Export to (including) block BLOCK, which may be an
|
||||
index, hash or 'latest' (default: {flag_to}).
|
||||
--format FORMAT For import/export in given format. FORMAT must be
|
||||
one of 'hex' and 'binary'.
|
||||
(default: {flag_format:?} = Import: auto, Export: binary)
|
||||
--no-seal-check Skip block seal check. (default: {flag_no_seal_check})
|
||||
--at BLOCK Export state at the given block, which may be an
|
||||
index, hash, or 'latest'. (default: {flag_at})
|
||||
--no-storage Don't export account storage. (default: {flag_no_storage})
|
||||
--no-code Don't export account code. (default: {flag_no_code})
|
||||
--min-balance WEI Don't export accounts with balance less than specified.
|
||||
(default: {flag_min_balance:?})
|
||||
--max-balance WEI Don't export accounts with balance greater than specified.
|
||||
(default: {flag_max_balance:?})
|
||||
--from BLOCK Export from block BLOCK, which may be an index or
|
||||
hash (default: {flag_from}).
|
||||
--to BLOCK Export to (including) block BLOCK, which may be an
|
||||
index, hash or 'latest' (default: {flag_to}).
|
||||
--format FORMAT For import/export in given format. FORMAT must be
|
||||
one of 'hex' and 'binary'.
|
||||
(default: {flag_format:?} = Import: auto, Export: binary)
|
||||
--no-seal-check Skip block seal check. (default: {flag_no_seal_check})
|
||||
--at BLOCK Export state at the given block, which may be an
|
||||
index, hash, or 'latest'. (default: {flag_at})
|
||||
--no-storage Don't export account storage. (default: {flag_no_storage})
|
||||
--no-code Don't export account code. (default: {flag_no_code})
|
||||
--min-balance WEI Don't export accounts with balance less than specified.
|
||||
(default: {flag_min_balance:?})
|
||||
--max-balance WEI Don't export accounts with balance greater than specified.
|
||||
(default: {flag_max_balance:?})
|
||||
|
||||
Snapshot Options:
|
||||
--at BLOCK Take a snapshot at the given block, which may be an
|
||||
index, hash, or 'latest'. Note that taking snapshots at
|
||||
non-recent blocks will only work with --pruning archive
|
||||
(default: {flag_at})
|
||||
--no-periodic-snapshot Disable automated snapshots which usually occur once
|
||||
every 10000 blocks. (default: {flag_no_periodic_snapshot})
|
||||
--at BLOCK Take a snapshot at the given block, which may be an
|
||||
index, hash, or 'latest'. Note that taking snapshots at
|
||||
non-recent blocks will only work with --pruning archive
|
||||
(default: {flag_at})
|
||||
--no-periodic-snapshot Disable automated snapshots which usually occur once
|
||||
every 10000 blocks. (default: {flag_no_periodic_snapshot})
|
||||
|
||||
Virtual Machine Options:
|
||||
--jitvm Enable the JIT VM. (default: {flag_jitvm})
|
||||
--jitvm Enable the JIT VM. (default: {flag_jitvm})
|
||||
|
||||
Legacy Options:
|
||||
--geth Run in Geth-compatibility mode. Sets the IPC path
|
||||
to be the same as Geth's. Overrides the --ipc-path
|
||||
and --ipcpath options. Alters RPCs to reflect Geth
|
||||
bugs. Includes the personal_ RPC by default.
|
||||
--testnet Geth-compatible testnet mode. Equivalent to --chain
|
||||
testnet --keys-path $HOME/parity/testnet-keys.
|
||||
Overrides the --keys-path option.
|
||||
--import-geth-keys Attempt to import keys from Geth client.
|
||||
--datadir PATH Equivalent to --base-path PATH.
|
||||
--networkid INDEX Equivalent to --network-id INDEX.
|
||||
--peers NUM Equivalent to --min-peers NUM.
|
||||
--nodekey KEY Equivalent to --node-key KEY.
|
||||
--nodiscover Equivalent to --no-discovery.
|
||||
-j --jsonrpc Does nothing; JSON-RPC is on by default now.
|
||||
--jsonrpc-off Equivalent to --no-jsonrpc.
|
||||
-w --webapp Does nothing; dapps server is on by default now.
|
||||
--dapps-off Equivalent to --no-dapps.
|
||||
--rpc Does nothing; JSON-RPC is on by default now.
|
||||
--rpcaddr IP Equivalent to --jsonrpc-interface IP.
|
||||
--rpcport PORT Equivalent to --jsonrpc-port PORT.
|
||||
--rpcapi APIS Equivalent to --jsonrpc-apis APIS.
|
||||
--rpccorsdomain URL Equivalent to --jsonrpc-cors URL.
|
||||
--ipcdisable Equivalent to --no-ipc.
|
||||
--ipc-off Equivalent to --no-ipc.
|
||||
--ipcapi APIS Equivalent to --ipc-apis APIS.
|
||||
--ipcpath PATH Equivalent to --ipc-path PATH.
|
||||
--gasprice WEI Minimum amount of Wei per GAS to be paid for a
|
||||
transaction to be accepted for mining. Overrides
|
||||
--basic-tx-usd.
|
||||
--etherbase ADDRESS Equivalent to --author ADDRESS.
|
||||
--extradata STRING Equivalent to --extra-data STRING.
|
||||
--cache MB Equivalent to --cache-size MB.
|
||||
--geth Run in Geth-compatibility mode. Sets the IPC path
|
||||
to be the same as Geth's. Overrides the --ipc-path
|
||||
and --ipcpath options. Alters RPCs to reflect Geth
|
||||
bugs. Includes the personal_ RPC by default.
|
||||
--testnet Geth-compatible testnet mode. Equivalent to --chain
|
||||
testnet --keys-path $HOME/parity/testnet-keys.
|
||||
Overrides the --keys-path option.
|
||||
--import-geth-keys Attempt to import keys from Geth client.
|
||||
--datadir PATH Equivalent to --base-path PATH.
|
||||
--networkid INDEX Equivalent to --network-id INDEX.
|
||||
--peers NUM Equivalent to --min-peers NUM.
|
||||
--nodekey KEY Equivalent to --node-key KEY.
|
||||
--nodiscover Equivalent to --no-discovery.
|
||||
-j --jsonrpc Does nothing; JSON-RPC is on by default now.
|
||||
--jsonrpc-off Equivalent to --no-jsonrpc.
|
||||
-w --webapp Does nothing; dapps server is on by default now.
|
||||
--dapps-off Equivalent to --no-dapps.
|
||||
--rpc Does nothing; JSON-RPC is on by default now.
|
||||
--rpcaddr IP Equivalent to --jsonrpc-interface IP.
|
||||
--rpcport PORT Equivalent to --jsonrpc-port PORT.
|
||||
--rpcapi APIS Equivalent to --jsonrpc-apis APIS.
|
||||
--rpccorsdomain URL Equivalent to --jsonrpc-cors URL.
|
||||
--ipcdisable Equivalent to --no-ipc.
|
||||
--ipc-off Equivalent to --no-ipc.
|
||||
--ipcapi APIS Equivalent to --ipc-apis APIS.
|
||||
--ipcpath PATH Equivalent to --ipc-path PATH.
|
||||
--gasprice WEI Minimum amount of Wei per GAS to be paid for a
|
||||
transaction to be accepted for mining. Overrides
|
||||
--basic-tx-usd.
|
||||
--etherbase ADDRESS Equivalent to --author ADDRESS.
|
||||
--extradata STRING Equivalent to --extra-data STRING.
|
||||
--cache MB Equivalent to --cache-size MB.
|
||||
|
||||
Internal Options:
|
||||
--can-restart Executable will auto-restart if exiting with 69.
|
||||
--can-restart Executable will auto-restart if exiting with 69.
|
||||
|
||||
Miscellaneous Options:
|
||||
-c --config CONFIG Specify a filename containing a configuration file.
|
||||
(default: {flag_config})
|
||||
-l --logging LOGGING Specify the logging level. Must conform to the same
|
||||
format as RUST_LOG. (default: {flag_logging:?})
|
||||
--log-file FILENAME Specify a filename into which logging should be
|
||||
appended. (default: {flag_log_file:?})
|
||||
--no-config Don't load a configuration file.
|
||||
--no-color Don't use terminal color codes in output. (default: {flag_no_color})
|
||||
-v --version Show information about version.
|
||||
-h --help Show this screen.
|
||||
-c --config CONFIG Specify a filename containing a configuration file.
|
||||
(default: {flag_config})
|
||||
-l --logging LOGGING Specify the logging level. Must conform to the same
|
||||
format as RUST_LOG. (default: {flag_logging:?})
|
||||
--log-file FILENAME Specify a filename into which logging should be
|
||||
appended. (default: {flag_log_file:?})
|
||||
--no-config Don't load a configuration file.
|
||||
--no-color Don't use terminal color codes in output. (default: {flag_no_color})
|
||||
-v --version Show information about version.
|
||||
-h --help Show this screen.
|
||||
|
@ -491,7 +491,8 @@ impl Configuration {
|
||||
ban_duration: Duration::from_secs(self.args.flag_tx_queue_ban_time as u64),
|
||||
},
|
||||
None => Banning::Disabled,
|
||||
}
|
||||
},
|
||||
refuse_service_transactions: self.args.flag_refuse_service_transactions,
|
||||
};
|
||||
|
||||
Ok(options)
|
||||
|
@ -66,6 +66,7 @@ fn miner_service(spec: &Spec, accounts: Arc<AccountProvider>) -> Arc<Miner> {
|
||||
reseal_min_period: Duration::from_secs(0),
|
||||
work_queue_size: 50,
|
||||
enable_resubmission: true,
|
||||
refuse_service_transactions: false,
|
||||
},
|
||||
GasPricer::new_fixed(20_000_000_000u64.into()),
|
||||
&spec,
|
||||
|
@ -17,6 +17,8 @@
|
||||
// Rust/Parity ABI struct autogenerator.
|
||||
// By Gav Wood, 2016.
|
||||
|
||||
var fs = require('fs');
|
||||
|
||||
String.prototype.replaceAll = function(f, t) { return this.split(f).join(t); }
|
||||
String.prototype.toSnake = function(){
|
||||
return this.replace(/([A-Z])/g, function($1){return "_"+$1.toLowerCase();});
|
||||
@ -24,6 +26,7 @@ String.prototype.toSnake = function(){
|
||||
|
||||
function makeContractFile(name, json, prefs) {
|
||||
return `// Autogenerated from JSON contract definition using Rust contract convertor.
|
||||
// Command line: ${process.argv.slice(2).join(' ')}
|
||||
#![allow(unused_imports)]
|
||||
use std::string::String;
|
||||
use std::result::Result;
|
||||
@ -39,14 +42,15 @@ function convertContract(name, json, prefs) {
|
||||
return `${prefs._pub ? "pub " : ""}struct ${name} {
|
||||
contract: ethabi::Contract,
|
||||
address: util::Address,
|
||||
do_call: Box<Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send ${prefs._sync ? "+ Sync " : ""}+ 'static>,
|
||||
${prefs._explicit_do_call ? "" : `do_call: Box<Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send${prefs._sync ? " + Sync " : ""}+ 'static>,`}
|
||||
}
|
||||
impl ${name} {
|
||||
pub fn new<F>(address: util::Address, do_call: F) -> Self where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send ${prefs._sync ? "+ Sync " : ""}+ 'static {
|
||||
pub fn new${prefs._explicit_do_call ? "" : "<F>"}(address: util::Address${prefs._explicit_do_call ? "" : `", do_call: F"`}) -> Self
|
||||
${prefs._explicit_do_call ? "" : `where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send ${prefs._sync ? "+ Sync " : ""}+ 'static`} {
|
||||
${name} {
|
||||
contract: ethabi::Contract::new(ethabi::Interface::load(b"${JSON.stringify(json.filter(a => a.type == 'function')).replaceAll('"', '\\"')}").expect("JSON is autogenerated; qed")),
|
||||
address: address,
|
||||
do_call: Box::new(do_call),
|
||||
${prefs._explicit_do_call ? "" : `do_call: Box::new(do_call),`}
|
||||
}
|
||||
}
|
||||
fn as_string<T: fmt::Debug>(e: T) -> String { format!("{:?}", e) }
|
||||
@ -205,6 +209,7 @@ function tokenExtract(expr, type, _prefs) {
|
||||
}
|
||||
|
||||
function convertFunction(json, _prefs) {
|
||||
let cprefs = _prefs || {};
|
||||
let prefs = (_prefs || {})[json.name] || (_prefs || {})['_'] || {};
|
||||
let snakeName = json.name.toSnake();
|
||||
let params = json.inputs.map((x, i) => (x.name ? x.name.toSnake() : ("_" + (i + 1))) + ": " + mapType(x.name, x.type, prefs[x.name]));
|
||||
@ -212,18 +217,35 @@ function convertFunction(json, _prefs) {
|
||||
return `
|
||||
/// Auto-generated from: \`${JSON.stringify(json)}\`
|
||||
#[allow(dead_code)]
|
||||
pub fn ${snakeName}(&self${params.length > 0 ? ', ' + params.join(", ") : ''}) -> Result<${returns}, String> {
|
||||
pub fn ${snakeName}${cprefs._explicit_do_call ? "<F>" : ""}(&self${cprefs._explicit_do_call ? `, do_call: &F` : ""}${params.length > 0 ? ', ' + params.join(", ") : ''}) -> Result<${returns}, String>
|
||||
${cprefs._explicit_do_call ? `where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send ${prefs._sync ? "+ Sync " : ""}` : ""} {
|
||||
let call = self.contract.function("${json.name}".into()).map_err(Self::as_string)?;
|
||||
let data = call.encode_call(
|
||||
vec![${json.inputs.map((x, i) => convertToken(x.name ? x.name.toSnake() : ("_" + (i + 1)), x.type, prefs[x.name])).join(', ')}]
|
||||
).map_err(Self::as_string)?;
|
||||
${json.outputs.length > 0 ? 'let output = ' : ''}call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
||||
${json.outputs.length > 0 ? 'let output = ' : ''}call.decode_output((${cprefs._explicit_do_call ? "" : "self."}do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
||||
${json.outputs.length > 0 ? 'let mut result = output.into_iter().rev().collect::<Vec<_>>();' : ''}
|
||||
Ok((${json.outputs.map((o, i) => tokenExtract('result.pop().ok_or("Invalid return arity")?', o.type, prefs[o.name])).join(', ')}))
|
||||
}`;
|
||||
}
|
||||
|
||||
// default preferences:
|
||||
let prefs = {"_pub": true, "_": {"_client": {"string": true}, "_platform": {"string": true}}, "_sync": true};
|
||||
// default contract json ABI
|
||||
let jsonabi = [{"constant":true,"inputs":[],"name":"getValidators","outputs":[{"name":"","type":"address[]"}],"payable":false,"type":"function"}];
|
||||
|
||||
let out = makeContractFile("Contract", jsonabi, {"_pub": true, "_": {"_client": {"string": true}, "_platform": {"string": true}}, "_sync": true});
|
||||
// parse command line options
|
||||
for (let i = 1; i < process.argv.length; ++i) {
|
||||
let arg = process.argv[i];
|
||||
if (arg.indexOf("--jsonabi") == 0) {
|
||||
jsonabi = arg.slice(10);
|
||||
if (fs.existsSync(jsonabi)) {
|
||||
jsonabi = JSON.parse(fs.readFileSync(jsonabi).toString());
|
||||
}
|
||||
} else if (arg.indexOf("--explicit-do-call") == 0) {
|
||||
prefs._explicit_do_call = true;
|
||||
}
|
||||
}
|
||||
|
||||
let out = makeContractFile("Contract", jsonabi, prefs);
|
||||
console.log(`${out}`);
|
||||
|
@ -96,6 +96,7 @@ use ethcore::header::{BlockNumber, Header as BlockHeader};
|
||||
use ethcore::client::{BlockChainClient, BlockStatus, BlockId, BlockChainInfo, BlockImportError, BlockQueueInfo};
|
||||
use ethcore::error::*;
|
||||
use ethcore::snapshot::{ManifestData, RestorationStatus};
|
||||
use ethcore::transaction::PendingTransaction;
|
||||
use sync_io::SyncIo;
|
||||
use time;
|
||||
use super::SyncConfig;
|
||||
@ -1949,7 +1950,46 @@ impl ChainSync {
|
||||
return 0;
|
||||
}
|
||||
|
||||
let all_transactions_hashes = transactions.iter().map(|tx| tx.transaction.hash()).collect::<HashSet<H256>>();
|
||||
let (transactions, service_transactions): (Vec<_>, Vec<_>) = transactions.into_iter()
|
||||
.partition(|tx| !tx.transaction.gas_price.is_zero());
|
||||
|
||||
// usual transactions could be propagated to all peers
|
||||
let mut affected_peers = HashSet::new();
|
||||
if !transactions.is_empty() {
|
||||
let peers = self.select_peers_for_transactions(|_| true);
|
||||
affected_peers = self.propagate_transactions_to_peers(io, peers, transactions);
|
||||
}
|
||||
|
||||
// most of times service_transactions will be empty
|
||||
// => there's no need to merge packets
|
||||
if !service_transactions.is_empty() {
|
||||
let service_transactions_peers = self.select_peers_for_transactions(|peer_id| accepts_service_transaction(&io.peer_info(*peer_id)));
|
||||
let service_transactions_affected_peers = self.propagate_transactions_to_peers(io, service_transactions_peers, service_transactions);
|
||||
affected_peers.extend(&service_transactions_affected_peers);
|
||||
}
|
||||
|
||||
affected_peers.len()
|
||||
}
|
||||
|
||||
fn select_peers_for_transactions<F>(&self, filter: F) -> Vec<PeerId>
|
||||
where F: Fn(&PeerId) -> bool {
|
||||
// sqrt(x)/x scaled to max u32
|
||||
let fraction = (self.peers.len() as f64).powf(-0.5).mul(u32::max_value() as f64).round() as u32;
|
||||
let small = self.peers.len() < MIN_PEERS_PROPAGATION;
|
||||
|
||||
let mut random = random::new();
|
||||
self.peers.keys()
|
||||
.cloned()
|
||||
.filter(filter)
|
||||
.filter(|_| small || random.next_u32() < fraction)
|
||||
.take(MAX_PEERS_PROPAGATION)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn propagate_transactions_to_peers(&mut self, io: &mut SyncIo, peers: Vec<PeerId>, transactions: Vec<PendingTransaction>) -> HashSet<PeerId> {
|
||||
let all_transactions_hashes = transactions.iter()
|
||||
.map(|tx| tx.transaction.hash())
|
||||
.collect::<HashSet<H256>>();
|
||||
let all_transactions_rlp = {
|
||||
let mut packet = RlpStream::new_list(transactions.len());
|
||||
for tx in &transactions { packet.append(&tx.transaction); }
|
||||
@ -1960,26 +2000,24 @@ impl ChainSync {
|
||||
self.transactions_stats.retain(&all_transactions_hashes);
|
||||
|
||||
// sqrt(x)/x scaled to max u32
|
||||
let fraction = (self.peers.len() as f64).powf(-0.5).mul(u32::max_value() as f64).round() as u32;
|
||||
let small = self.peers.len() < MIN_PEERS_PROPAGATION;
|
||||
let block_number = io.chain().chain_info().best_block_number;
|
||||
|
||||
let mut random = random::new();
|
||||
let lucky_peers = {
|
||||
let stats = &mut self.transactions_stats;
|
||||
self.peers.iter_mut()
|
||||
.filter(|_| small || random.next_u32() < fraction)
|
||||
.take(MAX_PEERS_PROPAGATION)
|
||||
.filter_map(|(peer_id, mut peer_info)| {
|
||||
peers.into_iter()
|
||||
.filter_map(|peer_id| {
|
||||
let stats = &mut self.transactions_stats;
|
||||
let peer_info = self.peers.get_mut(&peer_id)
|
||||
.expect("peer_id is form peers; peers is result of select_peers_for_transactions; select_peers_for_transactions selects peers from self.peers; qed");
|
||||
|
||||
// Send all transactions
|
||||
if peer_info.last_sent_transactions.is_empty() {
|
||||
// update stats
|
||||
for hash in &all_transactions_hashes {
|
||||
let id = io.peer_session_info(*peer_id).and_then(|info| info.id);
|
||||
let id = io.peer_session_info(peer_id).and_then(|info| info.id);
|
||||
stats.propagated(*hash, id, block_number);
|
||||
}
|
||||
peer_info.last_sent_transactions = all_transactions_hashes.clone();
|
||||
return Some((*peer_id, all_transactions_hashes.len(), all_transactions_rlp.clone()));
|
||||
return Some((peer_id, all_transactions_hashes.len(), all_transactions_rlp.clone()));
|
||||
}
|
||||
|
||||
// Get hashes of all transactions to send to this peer
|
||||
@ -1997,7 +2035,7 @@ impl ChainSync {
|
||||
if to_send.contains(&tx.transaction.hash()) {
|
||||
packet.append(&tx.transaction);
|
||||
// update stats
|
||||
let id = io.peer_session_info(*peer_id).and_then(|info| info.id);
|
||||
let id = io.peer_session_info(peer_id).and_then(|info| info.id);
|
||||
stats.propagated(tx.transaction.hash(), id, block_number);
|
||||
}
|
||||
}
|
||||
@ -2007,22 +2045,25 @@ impl ChainSync {
|
||||
.chain(&to_send)
|
||||
.cloned()
|
||||
.collect();
|
||||
Some((*peer_id, to_send.len(), packet.out()))
|
||||
Some((peer_id, to_send.len(), packet.out()))
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
// Send RLPs
|
||||
let peers = lucky_peers.len();
|
||||
if peers > 0 {
|
||||
let mut peers = HashSet::new();
|
||||
if lucky_peers.len() > 0 {
|
||||
let mut max_sent = 0;
|
||||
let lucky_peers_len = lucky_peers.len();
|
||||
for (peer_id, sent, rlp) in lucky_peers {
|
||||
peers.insert(peer_id);
|
||||
self.send_packet(io, peer_id, TRANSACTIONS_PACKET, rlp);
|
||||
trace!(target: "sync", "{:02} <- Transactions ({} entries)", peer_id, sent);
|
||||
max_sent = max(max_sent, sent);
|
||||
}
|
||||
debug!(target: "sync", "Sent up to {} transactions to {} peers.", max_sent, peers);
|
||||
debug!(target: "sync", "Sent up to {} transactions to {} peers.", max_sent, lucky_peers_len);
|
||||
}
|
||||
|
||||
peers
|
||||
}
|
||||
|
||||
@ -2109,12 +2150,30 @@ impl ChainSync {
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if peer is able to process service transactions
|
||||
fn accepts_service_transaction(client_id: &str) -> bool {
|
||||
// Parity versions starting from this will accept service-transactions
|
||||
const SERVICE_TRANSACTIONS_VERSION: (u32, u32) = (1u32, 6u32);
|
||||
// Parity client string prefix
|
||||
const PARITY_CLIENT_ID_PREFIX: &'static str = "Parity/v";
|
||||
|
||||
if !client_id.starts_with(PARITY_CLIENT_ID_PREFIX) {
|
||||
return false;
|
||||
}
|
||||
let ver: Vec<u32> = client_id[PARITY_CLIENT_ID_PREFIX.len()..].split('.')
|
||||
.take(2)
|
||||
.filter_map(|s| s.parse().ok())
|
||||
.collect();
|
||||
ver.len() == 2 && (ver[0] > SERVICE_TRANSACTIONS_VERSION.0 || (ver[0] == SERVICE_TRANSACTIONS_VERSION.0 && ver[1] >= SERVICE_TRANSACTIONS_VERSION.1))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::{HashSet, VecDeque};
|
||||
use network::PeerId;
|
||||
use tests::helpers::*;
|
||||
use tests::snapshot::TestSnapshotService;
|
||||
use util::{U256, Address, RwLock};
|
||||
use util::{Uint, U256, Address, RwLock};
|
||||
use util::sha3::Hashable;
|
||||
use util::hash::{H256, FixedHash};
|
||||
use util::bytes::Bytes;
|
||||
@ -2351,7 +2410,12 @@ mod tests {
|
||||
|
||||
fn dummy_sync_with_peer(peer_latest_hash: H256, client: &BlockChainClient) -> ChainSync {
|
||||
let mut sync = ChainSync::new(SyncConfig::default(), client);
|
||||
sync.peers.insert(0,
|
||||
insert_dummy_peer(&mut sync, 0, peer_latest_hash);
|
||||
sync
|
||||
}
|
||||
|
||||
fn insert_dummy_peer(sync: &mut ChainSync, peer_id: PeerId, peer_latest_hash: H256) {
|
||||
sync.peers.insert(peer_id,
|
||||
PeerInfo {
|
||||
protocol_version: 0,
|
||||
genesis: H256::zero(),
|
||||
@ -2370,7 +2434,7 @@ mod tests {
|
||||
asking_snapshot_data: None,
|
||||
block_set: None,
|
||||
});
|
||||
sync
|
||||
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -2622,6 +2686,79 @@ mod tests {
|
||||
assert_eq!(stats.len(), 1, "Should maintain stats for single transaction.")
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_propagate_service_transaction_to_selected_peers_only() {
|
||||
let mut client = TestBlockChainClient::new();
|
||||
client.insert_transaction_with_gas_price_to_queue(U256::zero());
|
||||
let block_hash = client.block_hash_delta_minus(1);
|
||||
let mut sync = ChainSync::new(SyncConfig::default(), &client);
|
||||
let queue = RwLock::new(VecDeque::new());
|
||||
let ss = TestSnapshotService::new();
|
||||
let mut io = TestIo::new(&mut client, &ss, &queue, None);
|
||||
|
||||
// when peer#1 is Geth
|
||||
insert_dummy_peer(&mut sync, 1, block_hash);
|
||||
io.peers_info.insert(1, "Geth".to_owned());
|
||||
// and peer#2 is Parity, accepting service transactions
|
||||
insert_dummy_peer(&mut sync, 2, block_hash);
|
||||
io.peers_info.insert(2, "Parity/v1.6".to_owned());
|
||||
// and peer#3 is Parity, discarding service transactions
|
||||
insert_dummy_peer(&mut sync, 3, block_hash);
|
||||
io.peers_info.insert(3, "Parity/v1.5".to_owned());
|
||||
// and peer#4 is Parity, accepting service transactions
|
||||
insert_dummy_peer(&mut sync, 4, block_hash);
|
||||
io.peers_info.insert(4, "Parity/v1.7.3-ABCDEFGH".to_owned());
|
||||
|
||||
// and new service transaction is propagated to peers
|
||||
sync.propagate_new_transactions(&mut io);
|
||||
|
||||
// peer#2 && peer#4 are receiving service transaction
|
||||
assert!(io.packets.iter().any(|p| p.packet_id == 0x02 && p.recipient == 2)); // TRANSACTIONS_PACKET
|
||||
assert!(io.packets.iter().any(|p| p.packet_id == 0x02 && p.recipient == 4)); // TRANSACTIONS_PACKET
|
||||
assert_eq!(io.packets.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_propagate_service_transaction_is_sent_as_separate_message() {
|
||||
let mut client = TestBlockChainClient::new();
|
||||
let tx1_hash = client.insert_transaction_to_queue();
|
||||
let tx2_hash = client.insert_transaction_with_gas_price_to_queue(U256::zero());
|
||||
let block_hash = client.block_hash_delta_minus(1);
|
||||
let mut sync = ChainSync::new(SyncConfig::default(), &client);
|
||||
let queue = RwLock::new(VecDeque::new());
|
||||
let ss = TestSnapshotService::new();
|
||||
let mut io = TestIo::new(&mut client, &ss, &queue, None);
|
||||
|
||||
// when peer#1 is Parity, accepting service transactions
|
||||
insert_dummy_peer(&mut sync, 1, block_hash);
|
||||
io.peers_info.insert(1, "Parity/v1.6".to_owned());
|
||||
|
||||
// and service + non-service transactions are propagated to peers
|
||||
sync.propagate_new_transactions(&mut io);
|
||||
|
||||
// two separate packets for peer are queued:
|
||||
// 1) with non-service-transaction
|
||||
// 2) with service transaction
|
||||
let sent_transactions: Vec<UnverifiedTransaction> = io.packets.iter()
|
||||
.filter_map(|p| {
|
||||
if p.packet_id != 0x02 || p.recipient != 1 { // TRANSACTIONS_PACKET
|
||||
return None;
|
||||
}
|
||||
|
||||
let rlp = UntrustedRlp::new(&*p.data);
|
||||
let item_count = rlp.item_count();
|
||||
if item_count != 1 {
|
||||
return None;
|
||||
}
|
||||
|
||||
rlp.at(0).ok().and_then(|r| r.as_val().ok())
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(sent_transactions.len(), 2);
|
||||
assert!(sent_transactions.iter().any(|tx| tx.hash() == tx1_hash));
|
||||
assert!(sent_transactions.iter().any(|tx| tx.hash() == tx2_hash));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn handles_peer_new_block_malformed() {
|
||||
let mut client = TestBlockChainClient::new();
|
||||
|
@ -50,6 +50,7 @@ pub struct TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p {
|
||||
pub sender: Option<PeerId>,
|
||||
pub to_disconnect: HashSet<PeerId>,
|
||||
pub packets: Vec<TestPacket>,
|
||||
pub peers_info: HashMap<PeerId, String>,
|
||||
overlay: RwLock<HashMap<BlockNumber, Bytes>>,
|
||||
}
|
||||
|
||||
@ -63,6 +64,7 @@ impl<'p, C> TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p {
|
||||
to_disconnect: HashSet::new(),
|
||||
overlay: RwLock::new(HashMap::new()),
|
||||
packets: Vec::new(),
|
||||
peers_info: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -112,6 +114,12 @@ impl<'p, C> SyncIo for TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p {
|
||||
&*self.chain
|
||||
}
|
||||
|
||||
fn peer_info(&self, peer_id: PeerId) -> String {
|
||||
self.peers_info.get(&peer_id)
|
||||
.cloned()
|
||||
.unwrap_or_else(|| peer_id.to_string())
|
||||
}
|
||||
|
||||
fn snapshot_service(&self) -> &SnapshotService {
|
||||
self.snapshot_service
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user