Integration with zgp whitelist contract (#4215)
* zgp-transactions checker * polishing * rename + refactor * refuse-service-transactions cl option * fixed tests compilation
This commit is contained in:
parent
220084d77d
commit
092e24b9f2
@ -308,22 +308,29 @@ impl TestBlockChainClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Inserts a transaction to miners transactions queue.
|
/// Inserts a transaction with given gas price to miners transactions queue.
|
||||||
pub fn insert_transaction_to_queue(&self) {
|
pub fn insert_transaction_with_gas_price_to_queue(&self, gas_price: U256) -> H256 {
|
||||||
let keypair = Random.generate().unwrap();
|
let keypair = Random.generate().unwrap();
|
||||||
let tx = Transaction {
|
let tx = Transaction {
|
||||||
action: Action::Create,
|
action: Action::Create,
|
||||||
value: U256::from(100),
|
value: U256::from(100),
|
||||||
data: "3331600055".from_hex().unwrap(),
|
data: "3331600055".from_hex().unwrap(),
|
||||||
gas: U256::from(100_000),
|
gas: U256::from(100_000),
|
||||||
gas_price: U256::from(20_000_000_000u64),
|
gas_price: gas_price,
|
||||||
nonce: U256::zero()
|
nonce: U256::zero()
|
||||||
};
|
};
|
||||||
let signed_tx = tx.sign(keypair.secret(), None);
|
let signed_tx = tx.sign(keypair.secret(), None);
|
||||||
self.set_balance(signed_tx.sender(), U256::from(10_000_000_000_000_000_000u64));
|
self.set_balance(signed_tx.sender(), 10_000_000_000_000_000_000u64.into());
|
||||||
|
let hash = signed_tx.hash();
|
||||||
let res = self.miner.import_external_transactions(self, vec![signed_tx.into()]);
|
let res = self.miner.import_external_transactions(self, vec![signed_tx.into()]);
|
||||||
let res = res.into_iter().next().unwrap().expect("Successful import");
|
let res = res.into_iter().next().unwrap().expect("Successful import");
|
||||||
assert_eq!(res, TransactionImportResult::Current);
|
assert_eq!(res, TransactionImportResult::Current);
|
||||||
|
hash
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Inserts a transaction to miners transactions queue.
|
||||||
|
pub fn insert_transaction_to_queue(&self) -> H256 {
|
||||||
|
self.insert_transaction_with_gas_price_to_queue(U256::from(20_000_000_000u64))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set reported history size.
|
/// Set reported history size.
|
||||||
|
@ -22,7 +22,7 @@ use std::ops::{Deref, DerefMut};
|
|||||||
use std::cell::Cell;
|
use std::cell::Cell;
|
||||||
use transaction::{SignedTransaction, Action};
|
use transaction::{SignedTransaction, Action};
|
||||||
use transient_hashmap::TransientHashMap;
|
use transient_hashmap::TransientHashMap;
|
||||||
use miner::{TransactionQueue, TransactionImportResult, TransactionOrigin, AccountDetails};
|
use miner::{TransactionQueue, TransactionQueueDetailsProvider, TransactionImportResult, TransactionOrigin};
|
||||||
use miner::transaction_queue::QueuingInstant;
|
use miner::transaction_queue::QueuingInstant;
|
||||||
use error::{Error, TransactionError};
|
use error::{Error, TransactionError};
|
||||||
use util::{Uint, U256, H256, Address, Hashable};
|
use util::{Uint, U256, H256, Address, Hashable};
|
||||||
@ -76,16 +76,12 @@ impl BanningTransactionQueue {
|
|||||||
|
|
||||||
/// Add to the queue taking bans into consideration.
|
/// Add to the queue taking bans into consideration.
|
||||||
/// May reject transaction because of the banlist.
|
/// May reject transaction because of the banlist.
|
||||||
pub fn add_with_banlist<F, G>(
|
pub fn add_with_banlist(
|
||||||
&mut self,
|
&mut self,
|
||||||
transaction: SignedTransaction,
|
transaction: SignedTransaction,
|
||||||
time: QueuingInstant,
|
time: QueuingInstant,
|
||||||
account_details: &F,
|
details_provider: &TransactionQueueDetailsProvider,
|
||||||
gas_estimator: &G,
|
) -> Result<TransactionImportResult, Error> {
|
||||||
) -> Result<TransactionImportResult, Error> where
|
|
||||||
F: Fn(&Address) -> AccountDetails,
|
|
||||||
G: Fn(&SignedTransaction) -> U256,
|
|
||||||
{
|
|
||||||
if let Threshold::BanAfter(threshold) = self.ban_threshold {
|
if let Threshold::BanAfter(threshold) = self.ban_threshold {
|
||||||
// NOTE In all checks use direct query to avoid increasing ban timeout.
|
// NOTE In all checks use direct query to avoid increasing ban timeout.
|
||||||
|
|
||||||
@ -116,7 +112,7 @@ impl BanningTransactionQueue {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.queue.add(transaction, TransactionOrigin::External, time, None, account_details, gas_estimator)
|
self.queue.add(transaction, TransactionOrigin::External, time, None, details_provider)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ban transaction with given hash.
|
/// Ban transaction with given hash.
|
||||||
@ -219,22 +215,16 @@ mod tests {
|
|||||||
use transaction::{Transaction, SignedTransaction, Action};
|
use transaction::{Transaction, SignedTransaction, Action};
|
||||||
use error::{Error, TransactionError};
|
use error::{Error, TransactionError};
|
||||||
use client::TransactionImportResult;
|
use client::TransactionImportResult;
|
||||||
use miner::{TransactionQueue, TransactionOrigin, AccountDetails};
|
use miner::{TransactionQueue, TransactionOrigin};
|
||||||
use util::{Uint, U256, Address, FromHex, Hashable};
|
use util::{Uint, U256, Address, FromHex, Hashable};
|
||||||
|
use miner::transaction_queue::test::DummyTransactionDetailsProvider;
|
||||||
|
|
||||||
fn queue() -> BanningTransactionQueue {
|
fn queue() -> BanningTransactionQueue {
|
||||||
BanningTransactionQueue::new(TransactionQueue::default(), Threshold::BanAfter(1), Duration::from_secs(180))
|
BanningTransactionQueue::new(TransactionQueue::default(), Threshold::BanAfter(1), Duration::from_secs(180))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_account_details(_address: &Address) -> AccountDetails {
|
fn default_tx_provider() -> DummyTransactionDetailsProvider {
|
||||||
AccountDetails {
|
DummyTransactionDetailsProvider::default().with_account_nonce(U256::zero())
|
||||||
nonce: U256::zero(),
|
|
||||||
balance: !U256::zero(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn gas_required(_tx: &SignedTransaction) -> U256 {
|
|
||||||
0.into()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn transaction(action: Action) -> SignedTransaction {
|
fn transaction(action: Action) -> SignedTransaction {
|
||||||
@ -264,7 +254,7 @@ mod tests {
|
|||||||
let mut txq = queue();
|
let mut txq = queue();
|
||||||
|
|
||||||
// when
|
// when
|
||||||
txq.queue().add(tx, TransactionOrigin::External, 0, None, &default_account_details, &gas_required).unwrap();
|
txq.queue().add(tx, TransactionOrigin::External, 0, None, &default_tx_provider()).unwrap();
|
||||||
|
|
||||||
// then
|
// then
|
||||||
// should also deref to queue
|
// should also deref to queue
|
||||||
@ -280,12 +270,12 @@ mod tests {
|
|||||||
let banlist1 = txq.ban_sender(tx.sender());
|
let banlist1 = txq.ban_sender(tx.sender());
|
||||||
assert!(!banlist1, "Threshold not reached yet.");
|
assert!(!banlist1, "Threshold not reached yet.");
|
||||||
// Insert once
|
// Insert once
|
||||||
let import1 = txq.add_with_banlist(tx.clone(), 0, &default_account_details, &gas_required).unwrap();
|
let import1 = txq.add_with_banlist(tx.clone(), 0, &default_tx_provider()).unwrap();
|
||||||
assert_eq!(import1, TransactionImportResult::Current);
|
assert_eq!(import1, TransactionImportResult::Current);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
let banlist2 = txq.ban_sender(tx.sender());
|
let banlist2 = txq.ban_sender(tx.sender());
|
||||||
let import2 = txq.add_with_banlist(tx.clone(), 0, &default_account_details, &gas_required);
|
let import2 = txq.add_with_banlist(tx.clone(), 0, &default_tx_provider());
|
||||||
|
|
||||||
// then
|
// then
|
||||||
assert!(banlist2, "Threshold should be reached - banned.");
|
assert!(banlist2, "Threshold should be reached - banned.");
|
||||||
@ -304,12 +294,12 @@ mod tests {
|
|||||||
let banlist1 = txq.ban_recipient(recipient);
|
let banlist1 = txq.ban_recipient(recipient);
|
||||||
assert!(!banlist1, "Threshold not reached yet.");
|
assert!(!banlist1, "Threshold not reached yet.");
|
||||||
// Insert once
|
// Insert once
|
||||||
let import1 = txq.add_with_banlist(tx.clone(), 0, &default_account_details, &gas_required).unwrap();
|
let import1 = txq.add_with_banlist(tx.clone(), 0, &default_tx_provider()).unwrap();
|
||||||
assert_eq!(import1, TransactionImportResult::Current);
|
assert_eq!(import1, TransactionImportResult::Current);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
let banlist2 = txq.ban_recipient(recipient);
|
let banlist2 = txq.ban_recipient(recipient);
|
||||||
let import2 = txq.add_with_banlist(tx.clone(), 0, &default_account_details, &gas_required);
|
let import2 = txq.add_with_banlist(tx.clone(), 0, &default_tx_provider());
|
||||||
|
|
||||||
// then
|
// then
|
||||||
assert!(banlist2, "Threshold should be reached - banned.");
|
assert!(banlist2, "Threshold should be reached - banned.");
|
||||||
@ -326,12 +316,12 @@ mod tests {
|
|||||||
let banlist1 = txq.ban_codehash(codehash);
|
let banlist1 = txq.ban_codehash(codehash);
|
||||||
assert!(!banlist1, "Threshold not reached yet.");
|
assert!(!banlist1, "Threshold not reached yet.");
|
||||||
// Insert once
|
// Insert once
|
||||||
let import1 = txq.add_with_banlist(tx.clone(), 0, &default_account_details, &gas_required).unwrap();
|
let import1 = txq.add_with_banlist(tx.clone(), 0, &default_tx_provider()).unwrap();
|
||||||
assert_eq!(import1, TransactionImportResult::Current);
|
assert_eq!(import1, TransactionImportResult::Current);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
let banlist2 = txq.ban_codehash(codehash);
|
let banlist2 = txq.ban_codehash(codehash);
|
||||||
let import2 = txq.add_with_banlist(tx.clone(), 0, &default_account_details, &gas_required);
|
let import2 = txq.add_with_banlist(tx.clone(), 0, &default_tx_provider());
|
||||||
|
|
||||||
// then
|
// then
|
||||||
assert!(banlist2, "Threshold should be reached - banned.");
|
assert!(banlist2, "Threshold should be reached - banned.");
|
||||||
|
@ -29,11 +29,13 @@ use transaction::{Action, UnverifiedTransaction, PendingTransaction, SignedTrans
|
|||||||
use receipt::{Receipt, RichReceipt};
|
use receipt::{Receipt, RichReceipt};
|
||||||
use spec::Spec;
|
use spec::Spec;
|
||||||
use engines::{Engine, Seal};
|
use engines::{Engine, Seal};
|
||||||
use miner::{MinerService, MinerStatus, TransactionQueue, PrioritizationStrategy, AccountDetails, TransactionOrigin};
|
use miner::{MinerService, MinerStatus, TransactionQueue, TransactionQueueDetailsProvider, PrioritizationStrategy,
|
||||||
|
AccountDetails, TransactionOrigin};
|
||||||
use miner::banning_queue::{BanningTransactionQueue, Threshold};
|
use miner::banning_queue::{BanningTransactionQueue, Threshold};
|
||||||
use miner::work_notify::WorkPoster;
|
use miner::work_notify::WorkPoster;
|
||||||
use miner::price_info::PriceInfo;
|
use miner::price_info::PriceInfo;
|
||||||
use miner::local_transactions::{Status as LocalTransactionStatus};
|
use miner::local_transactions::{Status as LocalTransactionStatus};
|
||||||
|
use miner::service_transaction_checker::ServiceTransactionChecker;
|
||||||
use header::BlockNumber;
|
use header::BlockNumber;
|
||||||
|
|
||||||
/// Different possible definitions for pending transaction set.
|
/// Different possible definitions for pending transaction set.
|
||||||
@ -102,8 +104,10 @@ pub struct MinerOptions {
|
|||||||
pub enable_resubmission: bool,
|
pub enable_resubmission: bool,
|
||||||
/// Global gas limit for all transaction in the queue except for local and retracted.
|
/// Global gas limit for all transaction in the queue except for local and retracted.
|
||||||
pub tx_queue_gas_limit: GasLimit,
|
pub tx_queue_gas_limit: GasLimit,
|
||||||
/// Banning settings
|
/// Banning settings.
|
||||||
pub tx_queue_banning: Banning,
|
pub tx_queue_banning: Banning,
|
||||||
|
/// Do we refuse to accept service transactions even if sender is certified.
|
||||||
|
pub refuse_service_transactions: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for MinerOptions {
|
impl Default for MinerOptions {
|
||||||
@ -122,6 +126,7 @@ impl Default for MinerOptions {
|
|||||||
work_queue_size: 20,
|
work_queue_size: 20,
|
||||||
enable_resubmission: true,
|
enable_resubmission: true,
|
||||||
tx_queue_banning: Banning::Disabled,
|
tx_queue_banning: Banning::Disabled,
|
||||||
|
refuse_service_transactions: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -221,6 +226,7 @@ pub struct Miner {
|
|||||||
accounts: Option<Arc<AccountProvider>>,
|
accounts: Option<Arc<AccountProvider>>,
|
||||||
work_poster: Option<WorkPoster>,
|
work_poster: Option<WorkPoster>,
|
||||||
gas_pricer: Mutex<GasPricer>,
|
gas_pricer: Mutex<GasPricer>,
|
||||||
|
service_transaction_action: ServiceTransactionAction,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Miner {
|
impl Miner {
|
||||||
@ -244,6 +250,10 @@ impl Miner {
|
|||||||
ban_duration,
|
ban_duration,
|
||||||
),
|
),
|
||||||
};
|
};
|
||||||
|
let service_transaction_action = match options.refuse_service_transactions {
|
||||||
|
true => ServiceTransactionAction::Refuse,
|
||||||
|
false => ServiceTransactionAction::Check(ServiceTransactionChecker::default()),
|
||||||
|
};
|
||||||
Miner {
|
Miner {
|
||||||
transaction_queue: Arc::new(Mutex::new(txq)),
|
transaction_queue: Arc::new(Mutex::new(txq)),
|
||||||
next_allowed_reseal: Mutex::new(Instant::now()),
|
next_allowed_reseal: Mutex::new(Instant::now()),
|
||||||
@ -263,6 +273,7 @@ impl Miner {
|
|||||||
engine: spec.engine.clone(),
|
engine: spec.engine.clone(),
|
||||||
work_poster: work_poster,
|
work_poster: work_poster,
|
||||||
gas_pricer: Mutex::new(gas_pricer),
|
gas_pricer: Mutex::new(gas_pricer),
|
||||||
|
service_transaction_action: service_transaction_action,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -526,8 +537,8 @@ impl Miner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn update_gas_limit(&self, chain: &MiningBlockChainClient) {
|
fn update_gas_limit(&self, client: &MiningBlockChainClient) {
|
||||||
let gas_limit = chain.best_block_header().gas_limit();
|
let gas_limit = client.best_block_header().gas_limit();
|
||||||
let mut queue = self.transaction_queue.lock();
|
let mut queue = self.transaction_queue.lock();
|
||||||
queue.set_gas_limit(gas_limit);
|
queue.set_gas_limit(gas_limit);
|
||||||
if let GasLimit::Auto = self.options.tx_queue_gas_limit {
|
if let GasLimit::Auto = self.options.tx_queue_gas_limit {
|
||||||
@ -537,7 +548,7 @@ impl Miner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true if we had to prepare new pending block.
|
/// Returns true if we had to prepare new pending block.
|
||||||
fn prepare_work_sealing(&self, chain: &MiningBlockChainClient) -> bool {
|
fn prepare_work_sealing(&self, client: &MiningBlockChainClient) -> bool {
|
||||||
trace!(target: "miner", "prepare_work_sealing: entering");
|
trace!(target: "miner", "prepare_work_sealing: entering");
|
||||||
let prepare_new = {
|
let prepare_new = {
|
||||||
let mut sealing_work = self.sealing_work.lock();
|
let mut sealing_work = self.sealing_work.lock();
|
||||||
@ -555,11 +566,11 @@ impl Miner {
|
|||||||
// | NOTE Code below requires transaction_queue and sealing_work locks. |
|
// | NOTE Code below requires transaction_queue and sealing_work locks. |
|
||||||
// | Make sure to release the locks before calling that method. |
|
// | Make sure to release the locks before calling that method. |
|
||||||
// --------------------------------------------------------------------------
|
// --------------------------------------------------------------------------
|
||||||
let (block, original_work_hash) = self.prepare_block(chain);
|
let (block, original_work_hash) = self.prepare_block(client);
|
||||||
self.prepare_work(block, original_work_hash);
|
self.prepare_work(block, original_work_hash);
|
||||||
}
|
}
|
||||||
let mut sealing_block_last_request = self.sealing_block_last_request.lock();
|
let mut sealing_block_last_request = self.sealing_block_last_request.lock();
|
||||||
let best_number = chain.chain_info().best_block_number;
|
let best_number = client.chain_info().best_block_number;
|
||||||
if *sealing_block_last_request != best_number {
|
if *sealing_block_last_request != best_number {
|
||||||
trace!(target: "miner", "prepare_work_sealing: Miner received request (was {}, now {}) - waking up.", *sealing_block_last_request, best_number);
|
trace!(target: "miner", "prepare_work_sealing: Miner received request (was {}, now {}) - waking up.", *sealing_block_last_request, best_number);
|
||||||
*sealing_block_last_request = best_number;
|
*sealing_block_last_request = best_number;
|
||||||
@ -571,31 +582,23 @@ impl Miner {
|
|||||||
|
|
||||||
fn add_transactions_to_queue(
|
fn add_transactions_to_queue(
|
||||||
&self,
|
&self,
|
||||||
chain: &MiningBlockChainClient,
|
client: &MiningBlockChainClient,
|
||||||
transactions: Vec<UnverifiedTransaction>,
|
transactions: Vec<UnverifiedTransaction>,
|
||||||
default_origin: TransactionOrigin,
|
default_origin: TransactionOrigin,
|
||||||
min_block: Option<BlockNumber>,
|
min_block: Option<BlockNumber>,
|
||||||
transaction_queue: &mut BanningTransactionQueue,
|
transaction_queue: &mut BanningTransactionQueue,
|
||||||
) -> Vec<Result<TransactionImportResult, Error>> {
|
) -> Vec<Result<TransactionImportResult, Error>> {
|
||||||
|
|
||||||
let fetch_account = |a: &Address| AccountDetails {
|
|
||||||
nonce: chain.latest_nonce(a),
|
|
||||||
balance: chain.latest_balance(a),
|
|
||||||
};
|
|
||||||
|
|
||||||
let accounts = self.accounts.as_ref()
|
let accounts = self.accounts.as_ref()
|
||||||
.and_then(|provider| provider.accounts().ok())
|
.and_then(|provider| provider.accounts().ok())
|
||||||
.map(|accounts| accounts.into_iter().collect::<HashSet<_>>());
|
.map(|accounts| accounts.into_iter().collect::<HashSet<_>>());
|
||||||
|
|
||||||
let schedule = chain.latest_schedule();
|
let best_block_header = client.best_block_header().decode();
|
||||||
let gas_required = |tx: &SignedTransaction| tx.gas_required(&schedule).into();
|
let insertion_time = client.chain_info().best_block_number;
|
||||||
let best_block_header = chain.best_block_header().decode();
|
|
||||||
let insertion_time = chain.chain_info().best_block_number;
|
|
||||||
|
|
||||||
transactions.into_iter()
|
transactions.into_iter()
|
||||||
.map(|tx| {
|
.map(|tx| {
|
||||||
let hash = tx.hash();
|
let hash = tx.hash();
|
||||||
if chain.transaction_block(TransactionId::Hash(hash)).is_some() {
|
if client.transaction_block(TransactionId::Hash(hash)).is_some() {
|
||||||
debug!(target: "miner", "Rejected tx {:?}: already in the blockchain", hash);
|
debug!(target: "miner", "Rejected tx {:?}: already in the blockchain", hash);
|
||||||
return Err(Error::Transaction(TransactionError::AlreadyImported));
|
return Err(Error::Transaction(TransactionError::AlreadyImported));
|
||||||
}
|
}
|
||||||
@ -614,13 +617,17 @@ impl Miner {
|
|||||||
}
|
}
|
||||||
}).unwrap_or(default_origin);
|
}).unwrap_or(default_origin);
|
||||||
|
|
||||||
|
// try to install service transaction checker before appending transactions
|
||||||
|
self.service_transaction_action.update_from_chain_client(client);
|
||||||
|
|
||||||
|
let details_provider = TransactionDetailsProvider::new(client, &self.service_transaction_action);
|
||||||
match origin {
|
match origin {
|
||||||
TransactionOrigin::Local | TransactionOrigin::RetractedBlock => {
|
TransactionOrigin::Local | TransactionOrigin::RetractedBlock => {
|
||||||
transaction_queue.add(transaction, origin, insertion_time, min_block, &fetch_account, &gas_required)
|
transaction_queue.add(transaction, origin, insertion_time, min_block, &details_provider)
|
||||||
},
|
},
|
||||||
TransactionOrigin::External => {
|
TransactionOrigin::External => {
|
||||||
transaction_queue.add_with_banlist(transaction, insertion_time, &fetch_account, &gas_required)
|
transaction_queue.add_with_banlist(transaction, insertion_time, &details_provider)
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -1158,6 +1165,60 @@ impl MinerService for Miner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Action when service transaction is received
|
||||||
|
enum ServiceTransactionAction {
|
||||||
|
/// Refuse service transaction immediately
|
||||||
|
Refuse,
|
||||||
|
/// Accept if sender is certified to send service transactions
|
||||||
|
Check(ServiceTransactionChecker),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ServiceTransactionAction {
|
||||||
|
pub fn update_from_chain_client(&self, client: &MiningBlockChainClient) {
|
||||||
|
if let ServiceTransactionAction::Check(ref checker) = *self {
|
||||||
|
checker.update_from_chain_client(client);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn check(&self, client: &MiningBlockChainClient, tx: &SignedTransaction) -> Result<bool, String> {
|
||||||
|
match *self {
|
||||||
|
ServiceTransactionAction::Refuse => Err("configured to refuse service transactions".to_owned()),
|
||||||
|
ServiceTransactionAction::Check(ref checker) => checker.check(client, tx),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct TransactionDetailsProvider<'a> {
|
||||||
|
client: &'a MiningBlockChainClient,
|
||||||
|
service_transaction_action: &'a ServiceTransactionAction,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> TransactionDetailsProvider<'a> {
|
||||||
|
pub fn new(client: &'a MiningBlockChainClient, service_transaction_action: &'a ServiceTransactionAction) -> Self {
|
||||||
|
TransactionDetailsProvider {
|
||||||
|
client: client,
|
||||||
|
service_transaction_action: service_transaction_action,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> TransactionQueueDetailsProvider for TransactionDetailsProvider<'a> {
|
||||||
|
fn fetch_account(&self, address: &Address) -> AccountDetails {
|
||||||
|
AccountDetails {
|
||||||
|
nonce: self.client.latest_nonce(address),
|
||||||
|
balance: self.client.latest_balance(address),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn estimate_gas_required(&self, tx: &SignedTransaction) -> U256 {
|
||||||
|
tx.gas_required(&self.client.latest_schedule()).into()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_service_transaction_acceptable(&self, tx: &SignedTransaction) -> Result<bool, String> {
|
||||||
|
self.service_transaction_action.check(self.client, tx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
@ -1222,6 +1283,7 @@ mod tests {
|
|||||||
work_queue_size: 5,
|
work_queue_size: 5,
|
||||||
enable_resubmission: true,
|
enable_resubmission: true,
|
||||||
tx_queue_banning: Banning::Disabled,
|
tx_queue_banning: Banning::Disabled,
|
||||||
|
refuse_service_transactions: false,
|
||||||
},
|
},
|
||||||
GasPricer::new_fixed(0u64.into()),
|
GasPricer::new_fixed(0u64.into()),
|
||||||
&Spec::new_test(),
|
&Spec::new_test(),
|
||||||
|
@ -46,12 +46,14 @@ mod external;
|
|||||||
mod local_transactions;
|
mod local_transactions;
|
||||||
mod miner;
|
mod miner;
|
||||||
mod price_info;
|
mod price_info;
|
||||||
|
mod service_transaction_checker;
|
||||||
mod transaction_queue;
|
mod transaction_queue;
|
||||||
mod work_notify;
|
mod work_notify;
|
||||||
|
|
||||||
pub use self::external::{ExternalMiner, ExternalMinerService};
|
pub use self::external::{ExternalMiner, ExternalMinerService};
|
||||||
pub use self::miner::{Miner, MinerOptions, Banning, PendingSet, GasPricer, GasPriceCalibratorOptions, GasLimit};
|
pub use self::miner::{Miner, MinerOptions, Banning, PendingSet, GasPricer, GasPriceCalibratorOptions, GasLimit};
|
||||||
pub use self::transaction_queue::{TransactionQueue, PrioritizationStrategy, AccountDetails, TransactionOrigin};
|
pub use self::transaction_queue::{TransactionQueue, TransactionDetailsProvider as TransactionQueueDetailsProvider,
|
||||||
|
PrioritizationStrategy, AccountDetails, TransactionOrigin};
|
||||||
pub use self::local_transactions::{Status as LocalTransactionStatus};
|
pub use self::local_transactions::{Status as LocalTransactionStatus};
|
||||||
pub use client::TransactionImportResult;
|
pub use client::TransactionImportResult;
|
||||||
|
|
||||||
|
212
ethcore/src/miner/service_transaction_checker.rs
Normal file
212
ethcore/src/miner/service_transaction_checker.rs
Normal file
@ -0,0 +1,212 @@
|
|||||||
|
// Copyright 2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use client::MiningBlockChainClient;
|
||||||
|
use transaction::SignedTransaction;
|
||||||
|
use util::{U256, Uint, Mutex};
|
||||||
|
|
||||||
|
const SERVICE_TRANSACTION_CONTRACT_REGISTRY_NAME: &'static str = "service_transaction_checker";
|
||||||
|
|
||||||
|
/// Service transactions checker.
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct ServiceTransactionChecker {
|
||||||
|
contract: Mutex<Option<provider::Contract>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ServiceTransactionChecker {
|
||||||
|
/// Try to create instance, reading contract address from given chain client.
|
||||||
|
pub fn update_from_chain_client(&self, client: &MiningBlockChainClient) {
|
||||||
|
let mut contract = self.contract.lock();
|
||||||
|
if contract.is_none() {
|
||||||
|
*contract = client.registry_address(SERVICE_TRANSACTION_CONTRACT_REGISTRY_NAME.to_owned())
|
||||||
|
.and_then(|contract_addr| {
|
||||||
|
trace!(target: "txqueue", "Configuring for service transaction checker contract from {}", contract_addr);
|
||||||
|
|
||||||
|
Some(provider::Contract::new(contract_addr))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks if service transaction can be appended to the transaction queue.
|
||||||
|
pub fn check(&self, client: &MiningBlockChainClient, tx: &SignedTransaction) -> Result<bool, String> {
|
||||||
|
debug_assert_eq!(tx.gas_price, U256::zero());
|
||||||
|
|
||||||
|
if let Some(ref contract) = *self.contract.lock() {
|
||||||
|
let do_call = |a, d| client.call_contract(a, d);
|
||||||
|
contract.certified(&do_call, &tx.sender())
|
||||||
|
} else {
|
||||||
|
Err("contract is not configured".to_owned())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mod provider {
|
||||||
|
// Autogenerated from JSON contract definition using Rust contract convertor.
|
||||||
|
// Command line: --jsonabi=SimpleCertifier.abi --explicit-do-call
|
||||||
|
#![allow(unused_imports)]
|
||||||
|
use std::string::String;
|
||||||
|
use std::result::Result;
|
||||||
|
use std::fmt;
|
||||||
|
use {util, ethabi};
|
||||||
|
use util::{FixedHash, Uint};
|
||||||
|
|
||||||
|
pub struct Contract {
|
||||||
|
contract: ethabi::Contract,
|
||||||
|
address: util::Address,
|
||||||
|
|
||||||
|
}
|
||||||
|
impl Contract {
|
||||||
|
pub fn new(address: util::Address) -> Self
|
||||||
|
{
|
||||||
|
Contract {
|
||||||
|
contract: ethabi::Contract::new(ethabi::Interface::load(b"[{\"constant\":false,\"inputs\":[{\"name\":\"_new\",\"type\":\"address\"}],\"name\":\"setOwner\",\"outputs\":[],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"}],\"name\":\"certify\",\"outputs\":[],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"},{\"name\":\"_field\",\"type\":\"string\"}],\"name\":\"getAddress\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"}],\"name\":\"revoke\",\"outputs\":[],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"delegate\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"},{\"name\":\"_field\",\"type\":\"string\"}],\"name\":\"getUint\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_new\",\"type\":\"address\"}],\"name\":\"setDelegate\",\"outputs\":[],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"}],\"name\":\"certified\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"},{\"name\":\"_field\",\"type\":\"string\"}],\"name\":\"get\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\"}],\"payable\":false,\"type\":\"function\"}]").expect("JSON is autogenerated; qed")),
|
||||||
|
address: address,
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fn as_string<T: fmt::Debug>(e: T) -> String { format!("{:?}", e) }
|
||||||
|
|
||||||
|
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"}`
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn set_owner<F>(&self, do_call: &F, _new: &util::Address) -> Result<(), String>
|
||||||
|
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
|
||||||
|
let call = self.contract.function("setOwner".into()).map_err(Self::as_string)?;
|
||||||
|
let data = call.encode_call(
|
||||||
|
vec![ethabi::Token::Address(_new.clone().0)]
|
||||||
|
).map_err(Self::as_string)?;
|
||||||
|
call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"_who","type":"address"}],"name":"certify","outputs":[],"payable":false,"type":"function"}`
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn certify<F>(&self, do_call: &F, _who: &util::Address) -> Result<(), String>
|
||||||
|
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
|
||||||
|
let call = self.contract.function("certify".into()).map_err(Self::as_string)?;
|
||||||
|
let data = call.encode_call(
|
||||||
|
vec![ethabi::Token::Address(_who.clone().0)]
|
||||||
|
).map_err(Self::as_string)?;
|
||||||
|
call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Auto-generated from: `{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getAddress","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"}`
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn get_address<F>(&self, do_call: &F, _who: &util::Address, _field: &str) -> Result<util::Address, String>
|
||||||
|
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
|
||||||
|
let call = self.contract.function("getAddress".into()).map_err(Self::as_string)?;
|
||||||
|
let data = call.encode_call(
|
||||||
|
vec![ethabi::Token::Address(_who.clone().0), ethabi::Token::String(_field.to_owned())]
|
||||||
|
).map_err(Self::as_string)?;
|
||||||
|
let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
||||||
|
let mut result = output.into_iter().rev().collect::<Vec<_>>();
|
||||||
|
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_address().ok_or("Invalid type returned")?; util::Address::from(r) }))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"_who","type":"address"}],"name":"revoke","outputs":[],"payable":false,"type":"function"}`
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn revoke<F>(&self, do_call: &F, _who: &util::Address) -> Result<(), String>
|
||||||
|
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
|
||||||
|
let call = self.contract.function("revoke".into()).map_err(Self::as_string)?;
|
||||||
|
let data = call.encode_call(
|
||||||
|
vec![ethabi::Token::Address(_who.clone().0)]
|
||||||
|
).map_err(Self::as_string)?;
|
||||||
|
call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Auto-generated from: `{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"}`
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn owner<F>(&self, do_call: &F) -> Result<util::Address, String>
|
||||||
|
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
|
||||||
|
let call = self.contract.function("owner".into()).map_err(Self::as_string)?;
|
||||||
|
let data = call.encode_call(
|
||||||
|
vec![]
|
||||||
|
).map_err(Self::as_string)?;
|
||||||
|
let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
||||||
|
let mut result = output.into_iter().rev().collect::<Vec<_>>();
|
||||||
|
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_address().ok_or("Invalid type returned")?; util::Address::from(r) }))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Auto-generated from: `{"constant":true,"inputs":[],"name":"delegate","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"}`
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn delegate<F>(&self, do_call: &F) -> Result<util::Address, String>
|
||||||
|
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
|
||||||
|
let call = self.contract.function("delegate".into()).map_err(Self::as_string)?;
|
||||||
|
let data = call.encode_call(
|
||||||
|
vec![]
|
||||||
|
).map_err(Self::as_string)?;
|
||||||
|
let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
||||||
|
let mut result = output.into_iter().rev().collect::<Vec<_>>();
|
||||||
|
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_address().ok_or("Invalid type returned")?; util::Address::from(r) }))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Auto-generated from: `{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getUint","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"}`
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn get_uint<F>(&self, do_call: &F, _who: &util::Address, _field: &str) -> Result<util::U256, String>
|
||||||
|
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
|
||||||
|
let call = self.contract.function("getUint".into()).map_err(Self::as_string)?;
|
||||||
|
let data = call.encode_call(
|
||||||
|
vec![ethabi::Token::Address(_who.clone().0), ethabi::Token::String(_field.to_owned())]
|
||||||
|
).map_err(Self::as_string)?;
|
||||||
|
let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
||||||
|
let mut result = output.into_iter().rev().collect::<Vec<_>>();
|
||||||
|
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; util::U256::from(r.as_ref()) }))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setDelegate","outputs":[],"payable":false,"type":"function"}`
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn set_delegate<F>(&self, do_call: &F, _new: &util::Address) -> Result<(), String>
|
||||||
|
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
|
||||||
|
let call = self.contract.function("setDelegate".into()).map_err(Self::as_string)?;
|
||||||
|
let data = call.encode_call(
|
||||||
|
vec![ethabi::Token::Address(_new.clone().0)]
|
||||||
|
).map_err(Self::as_string)?;
|
||||||
|
call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Auto-generated from: `{"constant":true,"inputs":[{"name":"_who","type":"address"}],"name":"certified","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"}`
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn certified<F>(&self, do_call: &F, _who: &util::Address) -> Result<bool, String>
|
||||||
|
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
|
||||||
|
let call = self.contract.function("certified".into()).map_err(Self::as_string)?;
|
||||||
|
let data = call.encode_call(
|
||||||
|
vec![ethabi::Token::Address(_who.clone().0)]
|
||||||
|
).map_err(Self::as_string)?;
|
||||||
|
let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
||||||
|
let mut result = output.into_iter().rev().collect::<Vec<_>>();
|
||||||
|
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r }))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Auto-generated from: `{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"get","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"}`
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn get<F>(&self, do_call: &F, _who: &util::Address, _field: &str) -> Result<util::H256, String>
|
||||||
|
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
|
||||||
|
let call = self.contract.function("get".into()).map_err(Self::as_string)?;
|
||||||
|
let data = call.encode_call(
|
||||||
|
vec![ethabi::Token::Address(_who.clone().0), ethabi::Token::String(_field.to_owned())]
|
||||||
|
).map_err(Self::as_string)?;
|
||||||
|
let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
||||||
|
let mut result = output.into_iter().rev().collect::<Vec<_>>();
|
||||||
|
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_fixed_bytes().ok_or("Invalid type returned")?; util::H256::from_slice(r.as_ref()) }))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
@ -90,6 +90,7 @@ tx_time_limit = 100 #ms
|
|||||||
extra_data = "Parity"
|
extra_data = "Parity"
|
||||||
remove_solved = false
|
remove_solved = false
|
||||||
notify_work = ["http://localhost:3001"]
|
notify_work = ["http://localhost:3001"]
|
||||||
|
refuse_service_transactions = false
|
||||||
|
|
||||||
[footprint]
|
[footprint]
|
||||||
tracing = "auto"
|
tracing = "auto"
|
||||||
|
@ -230,6 +230,8 @@ usage! {
|
|||||||
or |c: &Config| otry!(c.mining).remove_solved.clone(),
|
or |c: &Config| otry!(c.mining).remove_solved.clone(),
|
||||||
flag_notify_work: Option<String> = None,
|
flag_notify_work: Option<String> = None,
|
||||||
or |c: &Config| otry!(c.mining).notify_work.clone().map(|vec| Some(vec.join(","))),
|
or |c: &Config| otry!(c.mining).notify_work.clone().map(|vec| Some(vec.join(","))),
|
||||||
|
flag_refuse_service_transactions: bool = false,
|
||||||
|
or |c: &Config| otry!(c.mining).refuse_service_transactions.clone(),
|
||||||
|
|
||||||
// -- Footprint Options
|
// -- Footprint Options
|
||||||
flag_tracing: String = "auto",
|
flag_tracing: String = "auto",
|
||||||
@ -416,6 +418,7 @@ struct Mining {
|
|||||||
tx_queue_ban_time: Option<u16>,
|
tx_queue_ban_time: Option<u16>,
|
||||||
remove_solved: Option<bool>,
|
remove_solved: Option<bool>,
|
||||||
notify_work: Option<Vec<String>>,
|
notify_work: Option<Vec<String>>,
|
||||||
|
refuse_service_transactions: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Debug, PartialEq, RustcDecodable)]
|
#[derive(Default, Debug, PartialEq, RustcDecodable)]
|
||||||
@ -633,6 +636,7 @@ mod tests {
|
|||||||
flag_tx_queue_ban_time: 180u16,
|
flag_tx_queue_ban_time: 180u16,
|
||||||
flag_remove_solved: false,
|
flag_remove_solved: false,
|
||||||
flag_notify_work: Some("http://localhost:3001".into()),
|
flag_notify_work: Some("http://localhost:3001".into()),
|
||||||
|
flag_refuse_service_transactions: false,
|
||||||
|
|
||||||
// -- Footprint Options
|
// -- Footprint Options
|
||||||
flag_tracing: "auto".into(),
|
flag_tracing: "auto".into(),
|
||||||
@ -811,6 +815,7 @@ mod tests {
|
|||||||
extra_data: None,
|
extra_data: None,
|
||||||
remove_solved: None,
|
remove_solved: None,
|
||||||
notify_work: None,
|
notify_work: None,
|
||||||
|
refuse_service_transactions: None,
|
||||||
}),
|
}),
|
||||||
footprint: Some(Footprint {
|
footprint: Some(Footprint {
|
||||||
tracing: Some("on".into()),
|
tracing: Some("on".into()),
|
||||||
|
@ -22,361 +22,363 @@ Usage:
|
|||||||
parity db kill [options]
|
parity db kill [options]
|
||||||
|
|
||||||
Operating Options:
|
Operating Options:
|
||||||
--mode MODE Set the operating mode. MODE can be one of:
|
--mode MODE Set the operating mode. MODE can be one of:
|
||||||
last - Uses the last-used mode, active if none.
|
last - Uses the last-used mode, active if none.
|
||||||
active - Parity continuously syncs the chain.
|
active - Parity continuously syncs the chain.
|
||||||
passive - Parity syncs initially, then sleeps and
|
passive - Parity syncs initially, then sleeps and
|
||||||
wakes regularly to resync.
|
wakes regularly to resync.
|
||||||
dark - Parity syncs only when the RPC is active.
|
dark - Parity syncs only when the RPC is active.
|
||||||
offline - Parity doesn't sync. (default: {flag_mode}).
|
offline - Parity doesn't sync. (default: {flag_mode}).
|
||||||
--mode-timeout SECS Specify the number of seconds before inactivity
|
--mode-timeout SECS Specify the number of seconds before inactivity
|
||||||
timeout occurs when mode is dark or passive
|
timeout occurs when mode is dark or passive
|
||||||
(default: {flag_mode_timeout}).
|
(default: {flag_mode_timeout}).
|
||||||
--mode-alarm SECS Specify the number of seconds before auto sleep
|
--mode-alarm SECS Specify the number of seconds before auto sleep
|
||||||
reawake timeout occurs when mode is passive
|
reawake timeout occurs when mode is passive
|
||||||
(default: {flag_mode_alarm}).
|
(default: {flag_mode_alarm}).
|
||||||
--auto-update SET Set a releases set to automatically update and
|
--auto-update SET Set a releases set to automatically update and
|
||||||
install.
|
install.
|
||||||
all - All updates in the our release track.
|
all - All updates in the our release track.
|
||||||
critical - Only consensus/security updates.
|
critical - Only consensus/security updates.
|
||||||
none - No updates will be auto-installed.
|
none - No updates will be auto-installed.
|
||||||
(default: {flag_auto_update}).
|
(default: {flag_auto_update}).
|
||||||
--release-track TRACK Set which release track we should use for updates.
|
--release-track TRACK Set which release track we should use for updates.
|
||||||
stable - Stable releases.
|
stable - Stable releases.
|
||||||
beta - Beta releases.
|
beta - Beta releases.
|
||||||
nightly - Nightly releases (unstable).
|
nightly - Nightly releases (unstable).
|
||||||
testing - Testing releases (do not use).
|
testing - Testing releases (do not use).
|
||||||
current - Whatever track this executable was
|
current - Whatever track this executable was
|
||||||
released on (default: {flag_release_track}).
|
released on (default: {flag_release_track}).
|
||||||
--no-download Normally new releases will be downloaded ready for
|
--no-download Normally new releases will be downloaded ready for
|
||||||
updating. This disables it. Not recommended.
|
updating. This disables it. Not recommended.
|
||||||
(default: {flag_no_download}).
|
(default: {flag_no_download}).
|
||||||
--no-consensus Force the binary to run even if there are known
|
--no-consensus Force the binary to run even if there are known
|
||||||
issues regarding consensus. Not recommended.
|
issues regarding consensus. Not recommended.
|
||||||
(default: {flag_no_consensus}).
|
(default: {flag_no_consensus}).
|
||||||
--force-direct Run the originally installed version of Parity,
|
--force-direct Run the originally installed version of Parity,
|
||||||
ignoring any updates that have since been installed.
|
ignoring any updates that have since been installed.
|
||||||
--chain CHAIN Specify the blockchain type. CHAIN may be either a
|
--chain CHAIN Specify the blockchain type. CHAIN may be either a
|
||||||
JSON chain specification file or olympic, frontier,
|
JSON chain specification file or olympic, frontier,
|
||||||
homestead, mainnet, morden, ropsten, classic, expanse,
|
homestead, mainnet, morden, ropsten, classic, expanse,
|
||||||
testnet or dev (default: {flag_chain}).
|
testnet or dev (default: {flag_chain}).
|
||||||
-d --base-path PATH Specify the base data storage path.
|
-d --base-path PATH Specify the base data storage path.
|
||||||
(default: {flag_base_path}).
|
(default: {flag_base_path}).
|
||||||
--db-path PATH Specify the database directory path
|
--db-path PATH Specify the database directory path
|
||||||
(default: {flag_db_path}).
|
(default: {flag_db_path}).
|
||||||
--keys-path PATH Specify the path for JSON key files to be found
|
--keys-path PATH Specify the path for JSON key files to be found
|
||||||
(default: {flag_keys_path}).
|
(default: {flag_keys_path}).
|
||||||
--identity NAME Specify your node's name. (default: {flag_identity})
|
--identity NAME Specify your node's name. (default: {flag_identity})
|
||||||
|
|
||||||
Account Options:
|
Account Options:
|
||||||
--unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution.
|
--unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution.
|
||||||
ACCOUNTS is a comma-delimited list of addresses.
|
ACCOUNTS is a comma-delimited list of addresses.
|
||||||
Implies --no-ui. (default: {flag_unlock:?})
|
Implies --no-ui. (default: {flag_unlock:?})
|
||||||
--password FILE Provide a file containing a password for unlocking
|
--password FILE Provide a file containing a password for unlocking
|
||||||
an account. Leading and trailing whitespace is trimmed.
|
an account. Leading and trailing whitespace is trimmed.
|
||||||
(default: {flag_password:?})
|
(default: {flag_password:?})
|
||||||
--keys-iterations NUM Specify the number of iterations to use when
|
--keys-iterations NUM Specify the number of iterations to use when
|
||||||
deriving key from the password (bigger is more
|
deriving key from the password (bigger is more
|
||||||
secure) (default: {flag_keys_iterations}).
|
secure) (default: {flag_keys_iterations}).
|
||||||
|
|
||||||
UI Options:
|
UI Options:
|
||||||
--force-ui Enable Trusted UI WebSocket endpoint,
|
--force-ui Enable Trusted UI WebSocket endpoint,
|
||||||
even when --unlock is in use. (default: ${flag_force_ui})
|
even when --unlock is in use. (default: ${flag_force_ui})
|
||||||
--no-ui Disable Trusted UI WebSocket endpoint.
|
--no-ui Disable Trusted UI WebSocket endpoint.
|
||||||
(default: ${flag_no_ui})
|
(default: ${flag_no_ui})
|
||||||
--ui-port PORT Specify the port of Trusted UI server
|
--ui-port PORT Specify the port of Trusted UI server
|
||||||
(default: {flag_ui_port}).
|
(default: {flag_ui_port}).
|
||||||
--ui-interface IP Specify the hostname portion of the Trusted UI
|
--ui-interface IP Specify the hostname portion of the Trusted UI
|
||||||
server, IP should be an interface's IP address,
|
server, IP should be an interface's IP address,
|
||||||
or local (default: {flag_ui_interface}).
|
or local (default: {flag_ui_interface}).
|
||||||
--ui-path PATH Specify directory where Trusted UIs tokens should
|
--ui-path PATH Specify directory where Trusted UIs tokens should
|
||||||
be stored. (default: {flag_ui_path})
|
be stored. (default: {flag_ui_path})
|
||||||
--ui-no-validation Disable Origin and Host headers validation for
|
--ui-no-validation Disable Origin and Host headers validation for
|
||||||
Trusted UI. WARNING: INSECURE. Used only for
|
Trusted UI. WARNING: INSECURE. Used only for
|
||||||
development. (default: {flag_ui_no_validation})
|
development. (default: {flag_ui_no_validation})
|
||||||
|
|
||||||
Networking Options:
|
Networking Options:
|
||||||
--warp Enable syncing from the snapshot over the network. (default: {flag_warp})
|
--warp Enable syncing from the snapshot over the network. (default: {flag_warp})
|
||||||
--port PORT Override the port on which the node should listen
|
--port PORT Override the port on which the node should listen
|
||||||
(default: {flag_port}).
|
(default: {flag_port}).
|
||||||
--min-peers NUM Try to maintain at least NUM peers (default: {flag_min_peers}).
|
--min-peers NUM Try to maintain at least NUM peers (default: {flag_min_peers}).
|
||||||
--max-peers NUM Allow up to NUM peers (default: {flag_max_peers}).
|
--max-peers NUM Allow up to NUM peers (default: {flag_max_peers}).
|
||||||
--snapshot-peers NUM Allow additional NUM peers for a snapshot sync
|
--snapshot-peers NUM Allow additional NUM peers for a snapshot sync
|
||||||
(default: {flag_snapshot_peers}).
|
(default: {flag_snapshot_peers}).
|
||||||
--nat METHOD Specify method to use for determining public
|
--nat METHOD Specify method to use for determining public
|
||||||
address. Must be one of: any, none, upnp,
|
address. Must be one of: any, none, upnp,
|
||||||
extip:<IP> (default: {flag_nat}).
|
extip:<IP> (default: {flag_nat}).
|
||||||
--network-id INDEX Override the network identifier from the chain we
|
--network-id INDEX Override the network identifier from the chain we
|
||||||
are on. (default: {flag_network_id:?})
|
are on. (default: {flag_network_id:?})
|
||||||
--bootnodes NODES Override the bootnodes from our chain. NODES should
|
--bootnodes NODES Override the bootnodes from our chain. NODES should
|
||||||
be comma-delimited enodes. (default: {flag_bootnodes:?})
|
be comma-delimited enodes. (default: {flag_bootnodes:?})
|
||||||
--no-discovery Disable new peer discovery. (default: {flag_no_discovery})
|
--no-discovery Disable new peer discovery. (default: {flag_no_discovery})
|
||||||
--node-key KEY Specify node secret key, either as 64-character hex
|
--node-key KEY Specify node secret key, either as 64-character hex
|
||||||
string or input to SHA3 operation. (default: {flag_node_key:?})
|
string or input to SHA3 operation. (default: {flag_node_key:?})
|
||||||
--reserved-peers FILE Provide a file containing enodes, one per line.
|
--reserved-peers FILE Provide a file containing enodes, one per line.
|
||||||
These nodes will always have a reserved slot on top
|
These nodes will always have a reserved slot on top
|
||||||
of the normal maximum peers. (default: {flag_reserved_peers:?})
|
of the normal maximum peers. (default: {flag_reserved_peers:?})
|
||||||
--reserved-only Connect only to reserved nodes. (default: {flag_reserved_only})
|
--reserved-only Connect only to reserved nodes. (default: {flag_reserved_only})
|
||||||
--allow-ips FILTER Filter outbound connections. Must be one of:
|
--allow-ips FILTER Filter outbound connections. Must be one of:
|
||||||
private - connect to private network IP addresses only;
|
private - connect to private network IP addresses only;
|
||||||
public - connect to public network IP addresses only;
|
public - connect to public network IP addresses only;
|
||||||
all - connect to any IP address.
|
all - connect to any IP address.
|
||||||
(default: {flag_allow_ips})
|
(default: {flag_allow_ips})
|
||||||
--max-pending-peers NUM Allow up to NUM pending connections. (default: {flag_max_pending_peers})
|
--max-pending-peers NUM Allow up to NUM pending connections. (default: {flag_max_pending_peers})
|
||||||
--no-ancient-blocks Disable downloading old blocks after snapshot restoration
|
--no-ancient-blocks Disable downloading old blocks after snapshot restoration
|
||||||
or warp sync. (default: {flag_no_ancient_blocks})
|
or warp sync. (default: {flag_no_ancient_blocks})
|
||||||
|
|
||||||
API and Console Options:
|
API and Console Options:
|
||||||
--no-jsonrpc Disable the JSON-RPC API server. (default: {flag_no_jsonrpc})
|
--no-jsonrpc Disable the JSON-RPC API server. (default: {flag_no_jsonrpc})
|
||||||
--jsonrpc-port PORT Specify the port portion of the JSONRPC API server
|
--jsonrpc-port PORT Specify the port portion of the JSONRPC API server
|
||||||
(default: {flag_jsonrpc_port}).
|
(default: {flag_jsonrpc_port}).
|
||||||
--jsonrpc-interface IP Specify the hostname portion of the JSONRPC API
|
--jsonrpc-interface IP Specify the hostname portion of the JSONRPC API
|
||||||
server, IP should be an interface's IP address, or
|
server, IP should be an interface's IP address, or
|
||||||
all (all interfaces) or local (default: {flag_jsonrpc_interface}).
|
all (all interfaces) or local (default: {flag_jsonrpc_interface}).
|
||||||
--jsonrpc-cors URL Specify CORS header for JSON-RPC API responses.
|
--jsonrpc-cors URL Specify CORS header for JSON-RPC API responses.
|
||||||
(default: {flag_jsonrpc_cors:?})
|
(default: {flag_jsonrpc_cors:?})
|
||||||
--jsonrpc-apis APIS Specify the APIs available through the JSONRPC
|
--jsonrpc-apis APIS Specify the APIs available through the JSONRPC
|
||||||
interface. APIS is a comma-delimited list of API
|
interface. APIS is a comma-delimited list of API
|
||||||
name. Possible name are web3, eth, net, personal,
|
name. Possible name are web3, eth, net, personal,
|
||||||
parity, parity_set, traces, rpc, parity_accounts.
|
parity, parity_set, traces, rpc, parity_accounts.
|
||||||
(default: {flag_jsonrpc_apis}).
|
(default: {flag_jsonrpc_apis}).
|
||||||
--jsonrpc-hosts HOSTS List of allowed Host header values. This option will
|
--jsonrpc-hosts HOSTS List of allowed Host header values. This option will
|
||||||
validate the Host header sent by the browser, it
|
validate the Host header sent by the browser, it
|
||||||
is additional security against some attack
|
is additional security against some attack
|
||||||
vectors. Special options: "all", "none",
|
vectors. Special options: "all", "none",
|
||||||
(default: {flag_jsonrpc_hosts}).
|
(default: {flag_jsonrpc_hosts}).
|
||||||
|
|
||||||
--no-ipc Disable JSON-RPC over IPC service. (default: {flag_no_ipc})
|
--no-ipc Disable JSON-RPC over IPC service. (default: {flag_no_ipc})
|
||||||
--ipc-path PATH Specify custom path for JSON-RPC over IPC service
|
--ipc-path PATH Specify custom path for JSON-RPC over IPC service
|
||||||
(default: {flag_ipc_path}).
|
(default: {flag_ipc_path}).
|
||||||
--ipc-apis APIS Specify custom API set available via JSON-RPC over
|
--ipc-apis APIS Specify custom API set available via JSON-RPC over
|
||||||
IPC (default: {flag_ipc_apis}).
|
IPC (default: {flag_ipc_apis}).
|
||||||
|
|
||||||
--no-dapps Disable the Dapps server (e.g. status page). (default: {flag_no_dapps})
|
--no-dapps Disable the Dapps server (e.g. status page). (default: {flag_no_dapps})
|
||||||
--dapps-port PORT Specify the port portion of the Dapps server
|
--dapps-port PORT Specify the port portion of the Dapps server
|
||||||
(default: {flag_dapps_port}).
|
(default: {flag_dapps_port}).
|
||||||
--dapps-interface IP Specify the hostname portion of the Dapps
|
--dapps-interface IP Specify the hostname portion of the Dapps
|
||||||
server, IP should be an interface's IP address,
|
server, IP should be an interface's IP address,
|
||||||
or local (default: {flag_dapps_interface}).
|
or local (default: {flag_dapps_interface}).
|
||||||
--dapps-hosts HOSTS List of allowed Host header values. This option will
|
--dapps-hosts HOSTS List of allowed Host header values. This option will
|
||||||
validate the Host header sent by the browser, it
|
validate the Host header sent by the browser, it
|
||||||
is additional security against some attack
|
is additional security against some attack
|
||||||
vectors. Special options: "all", "none",
|
vectors. Special options: "all", "none",
|
||||||
(default: {flag_dapps_hosts}).
|
(default: {flag_dapps_hosts}).
|
||||||
--dapps-user USERNAME Specify username for Dapps server. It will be
|
--dapps-user USERNAME Specify username for Dapps server. It will be
|
||||||
used in HTTP Basic Authentication Scheme.
|
used in HTTP Basic Authentication Scheme.
|
||||||
If --dapps-pass is not specified you will be
|
If --dapps-pass is not specified you will be
|
||||||
asked for password on startup. (default: {flag_dapps_user:?})
|
asked for password on startup. (default: {flag_dapps_user:?})
|
||||||
--dapps-pass PASSWORD Specify password for Dapps server. Use only in
|
--dapps-pass PASSWORD Specify password for Dapps server. Use only in
|
||||||
conjunction with --dapps-user. (default: {flag_dapps_pass:?})
|
conjunction with --dapps-user. (default: {flag_dapps_pass:?})
|
||||||
--dapps-path PATH Specify directory where dapps should be installed.
|
--dapps-path PATH Specify directory where dapps should be installed.
|
||||||
(default: {flag_dapps_path})
|
(default: {flag_dapps_path})
|
||||||
|
|
||||||
Sealing/Mining Options:
|
Sealing/Mining Options:
|
||||||
--author ADDRESS Specify the block author (aka "coinbase") address
|
--author ADDRESS Specify the block author (aka "coinbase") address
|
||||||
for sending block rewards from sealed blocks.
|
for sending block rewards from sealed blocks.
|
||||||
NOTE: MINING WILL NOT WORK WITHOUT THIS OPTION.
|
NOTE: MINING WILL NOT WORK WITHOUT THIS OPTION.
|
||||||
(default: {flag_author:?})
|
(default: {flag_author:?})
|
||||||
--engine-signer ADDRESS Specify the address which should be used to
|
--engine-signer ADDRESS Specify the address which should be used to
|
||||||
sign consensus messages and issue blocks.
|
sign consensus messages and issue blocks.
|
||||||
Relevant only to non-PoW chains.
|
Relevant only to non-PoW chains.
|
||||||
(default: {flag_engine_signer:?})
|
(default: {flag_engine_signer:?})
|
||||||
--force-sealing Force the node to author new blocks as if it were
|
--force-sealing Force the node to author new blocks as if it were
|
||||||
always sealing/mining.
|
always sealing/mining.
|
||||||
(default: {flag_force_sealing})
|
(default: {flag_force_sealing})
|
||||||
--reseal-on-txs SET Specify which transactions should force the node
|
--reseal-on-txs SET Specify which transactions should force the node
|
||||||
to reseal a block. SET is one of:
|
to reseal a block. SET is one of:
|
||||||
none - never reseal on new transactions;
|
none - never reseal on new transactions;
|
||||||
own - reseal only on a new local transaction;
|
own - reseal only on a new local transaction;
|
||||||
ext - reseal only on a new external transaction;
|
ext - reseal only on a new external transaction;
|
||||||
all - reseal on all new transactions
|
all - reseal on all new transactions
|
||||||
(default: {flag_reseal_on_txs}).
|
(default: {flag_reseal_on_txs}).
|
||||||
--reseal-min-period MS Specify the minimum time between reseals from
|
--reseal-min-period MS Specify the minimum time between reseals from
|
||||||
incoming transactions. MS is time measured in
|
incoming transactions. MS is time measured in
|
||||||
milliseconds (default: {flag_reseal_min_period}).
|
milliseconds (default: {flag_reseal_min_period}).
|
||||||
--work-queue-size ITEMS Specify the number of historical work packages
|
--work-queue-size ITEMS Specify the number of historical work packages
|
||||||
which are kept cached lest a solution is found for
|
which are kept cached lest a solution is found for
|
||||||
them later. High values take more memory but result
|
them later. High values take more memory but result
|
||||||
in fewer unusable solutions (default: {flag_work_queue_size}).
|
in fewer unusable solutions (default: {flag_work_queue_size}).
|
||||||
--tx-gas-limit GAS Apply a limit of GAS as the maximum amount of gas
|
--tx-gas-limit GAS Apply a limit of GAS as the maximum amount of gas
|
||||||
a single transaction may have for it to be mined.
|
a single transaction may have for it to be mined.
|
||||||
(default: {flag_tx_gas_limit:?})
|
(default: {flag_tx_gas_limit:?})
|
||||||
--tx-time-limit MS Maximal time for processing single transaction.
|
--tx-time-limit MS Maximal time for processing single transaction.
|
||||||
If enabled senders/recipients/code of transactions
|
If enabled senders/recipients/code of transactions
|
||||||
offending the limit will be banned from being included
|
offending the limit will be banned from being included
|
||||||
in transaction queue for 180 seconds.
|
in transaction queue for 180 seconds.
|
||||||
(default: {flag_tx_time_limit:?})
|
(default: {flag_tx_time_limit:?})
|
||||||
--relay-set SET Set of transactions to relay. SET may be:
|
--relay-set SET Set of transactions to relay. SET may be:
|
||||||
cheap - Relay any transaction in the queue (this
|
cheap - Relay any transaction in the queue (this
|
||||||
may include invalid transactions);
|
may include invalid transactions);
|
||||||
strict - Relay only executed transactions (this
|
strict - Relay only executed transactions (this
|
||||||
guarantees we don't relay invalid transactions, but
|
guarantees we don't relay invalid transactions, but
|
||||||
means we relay nothing if not mining);
|
means we relay nothing if not mining);
|
||||||
lenient - Same as strict when mining, and cheap
|
lenient - Same as strict when mining, and cheap
|
||||||
when not (default: {flag_relay_set}).
|
when not (default: {flag_relay_set}).
|
||||||
--usd-per-tx USD Amount of USD to be paid for a basic transaction
|
--usd-per-tx USD Amount of USD to be paid for a basic transaction
|
||||||
(default: {flag_usd_per_tx}). The minimum gas price is set
|
(default: {flag_usd_per_tx}). The minimum gas price is set
|
||||||
accordingly.
|
accordingly.
|
||||||
--usd-per-eth SOURCE USD value of a single ETH. SOURCE may be either an
|
--usd-per-eth SOURCE USD value of a single ETH. SOURCE may be either an
|
||||||
amount in USD, a web service or 'auto' to use each
|
amount in USD, a web service or 'auto' to use each
|
||||||
web service in turn and fallback on the last known
|
web service in turn and fallback on the last known
|
||||||
good value (default: {flag_usd_per_eth}).
|
good value (default: {flag_usd_per_eth}).
|
||||||
--price-update-period T T will be allowed to pass between each gas price
|
--price-update-period T T will be allowed to pass between each gas price
|
||||||
update. T may be daily, hourly, a number of seconds,
|
update. T may be daily, hourly, a number of seconds,
|
||||||
or a time string of the form "2 days", "30 minutes"
|
or a time string of the form "2 days", "30 minutes"
|
||||||
etc. (default: {flag_price_update_period}).
|
etc. (default: {flag_price_update_period}).
|
||||||
--gas-floor-target GAS Amount of gas per block to target when sealing a new
|
--gas-floor-target GAS Amount of gas per block to target when sealing a new
|
||||||
block (default: {flag_gas_floor_target}).
|
block (default: {flag_gas_floor_target}).
|
||||||
--gas-cap GAS A cap on how large we will raise the gas limit per
|
--gas-cap GAS A cap on how large we will raise the gas limit per
|
||||||
block due to transaction volume (default: {flag_gas_cap}).
|
block due to transaction volume (default: {flag_gas_cap}).
|
||||||
--extra-data STRING Specify a custom extra-data for authored blocks, no
|
--extra-data STRING Specify a custom extra-data for authored blocks, no
|
||||||
more than 32 characters. (default: {flag_extra_data:?})
|
more than 32 characters. (default: {flag_extra_data:?})
|
||||||
--tx-queue-size LIMIT Maximum amount of transactions in the queue (waiting
|
--tx-queue-size LIMIT Maximum amount of transactions in the queue (waiting
|
||||||
to be included in next block) (default: {flag_tx_queue_size}).
|
to be included in next block) (default: {flag_tx_queue_size}).
|
||||||
--tx-queue-gas LIMIT Maximum amount of total gas for external transactions in
|
--tx-queue-gas LIMIT Maximum amount of total gas for external transactions in
|
||||||
the queue. LIMIT can be either an amount of gas or
|
the queue. LIMIT can be either an amount of gas or
|
||||||
'auto' or 'off'. 'auto' sets the limit to be 20x
|
'auto' or 'off'. 'auto' sets the limit to be 20x
|
||||||
the current block gas limit. (default: {flag_tx_queue_gas}).
|
the current block gas limit. (default: {flag_tx_queue_gas}).
|
||||||
--tx-queue-strategy S Prioritization strategy used to order transactions
|
--tx-queue-strategy S Prioritization strategy used to order transactions
|
||||||
in the queue. S may be:
|
in the queue. S may be:
|
||||||
gas - Prioritize txs with low gas limit;
|
gas - Prioritize txs with low gas limit;
|
||||||
gas_price - Prioritize txs with high gas price;
|
gas_price - Prioritize txs with high gas price;
|
||||||
gas_factor - Prioritize txs using gas price
|
gas_factor - Prioritize txs using gas price
|
||||||
and gas limit ratio (default: {flag_tx_queue_strategy}).
|
and gas limit ratio (default: {flag_tx_queue_strategy}).
|
||||||
--tx-queue-ban-count C Number of times maximal time for execution (--tx-time-limit)
|
--tx-queue-ban-count C Number of times maximal time for execution (--tx-time-limit)
|
||||||
can be exceeded before banning sender/recipient/code.
|
can be exceeded before banning sender/recipient/code.
|
||||||
(default: {flag_tx_queue_ban_count})
|
(default: {flag_tx_queue_ban_count})
|
||||||
--tx-queue-ban-time SEC Banning time (in seconds) for offenders of specified
|
--tx-queue-ban-time SEC Banning time (in seconds) for offenders of specified
|
||||||
execution time limit. Also number of offending actions
|
execution time limit. Also number of offending actions
|
||||||
have to reach the threshold within that time.
|
have to reach the threshold within that time.
|
||||||
(default: {flag_tx_queue_ban_time} seconds)
|
(default: {flag_tx_queue_ban_time} seconds)
|
||||||
--remove-solved Move solved blocks from the work package queue
|
--remove-solved Move solved blocks from the work package queue
|
||||||
instead of cloning them. This gives a slightly
|
instead of cloning them. This gives a slightly
|
||||||
faster import speed, but means that extra solutions
|
faster import speed, but means that extra solutions
|
||||||
submitted for the same work package will go unused.
|
submitted for the same work package will go unused.
|
||||||
(default: {flag_remove_solved})
|
(default: {flag_remove_solved})
|
||||||
--notify-work URLS URLs to which work package notifications are pushed.
|
--notify-work URLS URLs to which work package notifications are pushed.
|
||||||
URLS should be a comma-delimited list of HTTP URLs.
|
URLS should be a comma-delimited list of HTTP URLs.
|
||||||
(default: {flag_notify_work:?})
|
(default: {flag_notify_work:?})
|
||||||
|
--refuse-service-transactions Always refuse service transactions.
|
||||||
|
(default: {flag_refuse_service_transactions}).
|
||||||
|
|
||||||
Footprint Options:
|
Footprint Options:
|
||||||
--tracing BOOL Indicates if full transaction tracing should be
|
--tracing BOOL Indicates if full transaction tracing should be
|
||||||
enabled. Works only if client had been fully synced
|
enabled. Works only if client had been fully synced
|
||||||
with tracing enabled. BOOL may be one of auto, on,
|
with tracing enabled. BOOL may be one of auto, on,
|
||||||
off. auto uses last used value of this option (off
|
off. auto uses last used value of this option (off
|
||||||
if it does not exist) (default: {flag_tracing}).
|
if it does not exist) (default: {flag_tracing}).
|
||||||
--pruning METHOD Configure pruning of the state/storage trie. METHOD
|
--pruning METHOD Configure pruning of the state/storage trie. METHOD
|
||||||
may be one of auto, archive, fast:
|
may be one of auto, archive, fast:
|
||||||
archive - keep all state trie data. No pruning.
|
archive - keep all state trie data. No pruning.
|
||||||
fast - maintain journal overlay. Fast but 50MB used.
|
fast - maintain journal overlay. Fast but 50MB used.
|
||||||
auto - use the method most recently synced or
|
auto - use the method most recently synced or
|
||||||
default to fast if none synced (default: {flag_pruning}).
|
default to fast if none synced (default: {flag_pruning}).
|
||||||
--pruning-history NUM Set a minimum number of recent states to keep when pruning
|
--pruning-history NUM Set a minimum number of recent states to keep when pruning
|
||||||
is active. (default: {flag_pruning_history}).
|
is active. (default: {flag_pruning_history}).
|
||||||
--pruning-memory MB The ideal amount of memory in megabytes to use to store
|
--pruning-memory MB The ideal amount of memory in megabytes to use to store
|
||||||
recent states. As many states as possible will be kept
|
recent states. As many states as possible will be kept
|
||||||
within this limit, and at least --pruning-history states
|
within this limit, and at least --pruning-history states
|
||||||
will always be kept. (default: {flag_pruning_memory})
|
will always be kept. (default: {flag_pruning_memory})
|
||||||
--cache-size-db MB Override database cache size (default: {flag_cache_size_db}).
|
--cache-size-db MB Override database cache size (default: {flag_cache_size_db}).
|
||||||
--cache-size-blocks MB Specify the prefered size of the blockchain cache in
|
--cache-size-blocks MB Specify the prefered size of the blockchain cache in
|
||||||
megabytes (default: {flag_cache_size_blocks}).
|
megabytes (default: {flag_cache_size_blocks}).
|
||||||
--cache-size-queue MB Specify the maximum size of memory to use for block
|
--cache-size-queue MB Specify the maximum size of memory to use for block
|
||||||
queue (default: {flag_cache_size_queue}).
|
queue (default: {flag_cache_size_queue}).
|
||||||
--cache-size-state MB Specify the maximum size of memory to use for
|
--cache-size-state MB Specify the maximum size of memory to use for
|
||||||
the state cache (default: {flag_cache_size_state}).
|
the state cache (default: {flag_cache_size_state}).
|
||||||
--cache-size MB Set total amount of discretionary memory to use for
|
--cache-size MB Set total amount of discretionary memory to use for
|
||||||
the entire system, overrides other cache and queue
|
the entire system, overrides other cache and queue
|
||||||
options. (default: {flag_cache_size:?})
|
options. (default: {flag_cache_size:?})
|
||||||
--fast-and-loose Disables DB WAL, which gives a significant speed up
|
--fast-and-loose Disables DB WAL, which gives a significant speed up
|
||||||
but means an unclean exit is unrecoverable. (default: {flag_fast_and_loose})
|
but means an unclean exit is unrecoverable. (default: {flag_fast_and_loose})
|
||||||
--db-compaction TYPE Database compaction type. TYPE may be one of:
|
--db-compaction TYPE Database compaction type. TYPE may be one of:
|
||||||
ssd - suitable for SSDs and fast HDDs;
|
ssd - suitable for SSDs and fast HDDs;
|
||||||
hdd - suitable for slow HDDs;
|
hdd - suitable for slow HDDs;
|
||||||
auto - determine automatically (default: {flag_db_compaction}).
|
auto - determine automatically (default: {flag_db_compaction}).
|
||||||
--fat-db BOOL Build appropriate information to allow enumeration
|
--fat-db BOOL Build appropriate information to allow enumeration
|
||||||
of all accounts and storage keys. Doubles the size
|
of all accounts and storage keys. Doubles the size
|
||||||
of the state database. BOOL may be one of on, off
|
of the state database. BOOL may be one of on, off
|
||||||
or auto. (default: {flag_fat_db})
|
or auto. (default: {flag_fat_db})
|
||||||
--scale-verifiers Automatically scale amount of verifier threads based on
|
--scale-verifiers Automatically scale amount of verifier threads based on
|
||||||
workload. Not guaranteed to be faster.
|
workload. Not guaranteed to be faster.
|
||||||
(default: {flag_scale_verifiers})
|
(default: {flag_scale_verifiers})
|
||||||
--num-verifiers INT Amount of verifier threads to use or to begin with, if verifier
|
--num-verifiers INT Amount of verifier threads to use or to begin with, if verifier
|
||||||
auto-scaling is enabled. (default: {flag_num_verifiers:?})
|
auto-scaling is enabled. (default: {flag_num_verifiers:?})
|
||||||
|
|
||||||
Import/Export Options:
|
Import/Export Options:
|
||||||
--from BLOCK Export from block BLOCK, which may be an index or
|
--from BLOCK Export from block BLOCK, which may be an index or
|
||||||
hash (default: {flag_from}).
|
hash (default: {flag_from}).
|
||||||
--to BLOCK Export to (including) block BLOCK, which may be an
|
--to BLOCK Export to (including) block BLOCK, which may be an
|
||||||
index, hash or 'latest' (default: {flag_to}).
|
index, hash or 'latest' (default: {flag_to}).
|
||||||
--format FORMAT For import/export in given format. FORMAT must be
|
--format FORMAT For import/export in given format. FORMAT must be
|
||||||
one of 'hex' and 'binary'.
|
one of 'hex' and 'binary'.
|
||||||
(default: {flag_format:?} = Import: auto, Export: binary)
|
(default: {flag_format:?} = Import: auto, Export: binary)
|
||||||
--no-seal-check Skip block seal check. (default: {flag_no_seal_check})
|
--no-seal-check Skip block seal check. (default: {flag_no_seal_check})
|
||||||
--at BLOCK Export state at the given block, which may be an
|
--at BLOCK Export state at the given block, which may be an
|
||||||
index, hash, or 'latest'. (default: {flag_at})
|
index, hash, or 'latest'. (default: {flag_at})
|
||||||
--no-storage Don't export account storage. (default: {flag_no_storage})
|
--no-storage Don't export account storage. (default: {flag_no_storage})
|
||||||
--no-code Don't export account code. (default: {flag_no_code})
|
--no-code Don't export account code. (default: {flag_no_code})
|
||||||
--min-balance WEI Don't export accounts with balance less than specified.
|
--min-balance WEI Don't export accounts with balance less than specified.
|
||||||
(default: {flag_min_balance:?})
|
(default: {flag_min_balance:?})
|
||||||
--max-balance WEI Don't export accounts with balance greater than specified.
|
--max-balance WEI Don't export accounts with balance greater than specified.
|
||||||
(default: {flag_max_balance:?})
|
(default: {flag_max_balance:?})
|
||||||
|
|
||||||
Snapshot Options:
|
Snapshot Options:
|
||||||
--at BLOCK Take a snapshot at the given block, which may be an
|
--at BLOCK Take a snapshot at the given block, which may be an
|
||||||
index, hash, or 'latest'. Note that taking snapshots at
|
index, hash, or 'latest'. Note that taking snapshots at
|
||||||
non-recent blocks will only work with --pruning archive
|
non-recent blocks will only work with --pruning archive
|
||||||
(default: {flag_at})
|
(default: {flag_at})
|
||||||
--no-periodic-snapshot Disable automated snapshots which usually occur once
|
--no-periodic-snapshot Disable automated snapshots which usually occur once
|
||||||
every 10000 blocks. (default: {flag_no_periodic_snapshot})
|
every 10000 blocks. (default: {flag_no_periodic_snapshot})
|
||||||
|
|
||||||
Virtual Machine Options:
|
Virtual Machine Options:
|
||||||
--jitvm Enable the JIT VM. (default: {flag_jitvm})
|
--jitvm Enable the JIT VM. (default: {flag_jitvm})
|
||||||
|
|
||||||
Legacy Options:
|
Legacy Options:
|
||||||
--geth Run in Geth-compatibility mode. Sets the IPC path
|
--geth Run in Geth-compatibility mode. Sets the IPC path
|
||||||
to be the same as Geth's. Overrides the --ipc-path
|
to be the same as Geth's. Overrides the --ipc-path
|
||||||
and --ipcpath options. Alters RPCs to reflect Geth
|
and --ipcpath options. Alters RPCs to reflect Geth
|
||||||
bugs. Includes the personal_ RPC by default.
|
bugs. Includes the personal_ RPC by default.
|
||||||
--testnet Geth-compatible testnet mode. Equivalent to --chain
|
--testnet Geth-compatible testnet mode. Equivalent to --chain
|
||||||
testnet --keys-path $HOME/parity/testnet-keys.
|
testnet --keys-path $HOME/parity/testnet-keys.
|
||||||
Overrides the --keys-path option.
|
Overrides the --keys-path option.
|
||||||
--import-geth-keys Attempt to import keys from Geth client.
|
--import-geth-keys Attempt to import keys from Geth client.
|
||||||
--datadir PATH Equivalent to --base-path PATH.
|
--datadir PATH Equivalent to --base-path PATH.
|
||||||
--networkid INDEX Equivalent to --network-id INDEX.
|
--networkid INDEX Equivalent to --network-id INDEX.
|
||||||
--peers NUM Equivalent to --min-peers NUM.
|
--peers NUM Equivalent to --min-peers NUM.
|
||||||
--nodekey KEY Equivalent to --node-key KEY.
|
--nodekey KEY Equivalent to --node-key KEY.
|
||||||
--nodiscover Equivalent to --no-discovery.
|
--nodiscover Equivalent to --no-discovery.
|
||||||
-j --jsonrpc Does nothing; JSON-RPC is on by default now.
|
-j --jsonrpc Does nothing; JSON-RPC is on by default now.
|
||||||
--jsonrpc-off Equivalent to --no-jsonrpc.
|
--jsonrpc-off Equivalent to --no-jsonrpc.
|
||||||
-w --webapp Does nothing; dapps server is on by default now.
|
-w --webapp Does nothing; dapps server is on by default now.
|
||||||
--dapps-off Equivalent to --no-dapps.
|
--dapps-off Equivalent to --no-dapps.
|
||||||
--rpc Does nothing; JSON-RPC is on by default now.
|
--rpc Does nothing; JSON-RPC is on by default now.
|
||||||
--rpcaddr IP Equivalent to --jsonrpc-interface IP.
|
--rpcaddr IP Equivalent to --jsonrpc-interface IP.
|
||||||
--rpcport PORT Equivalent to --jsonrpc-port PORT.
|
--rpcport PORT Equivalent to --jsonrpc-port PORT.
|
||||||
--rpcapi APIS Equivalent to --jsonrpc-apis APIS.
|
--rpcapi APIS Equivalent to --jsonrpc-apis APIS.
|
||||||
--rpccorsdomain URL Equivalent to --jsonrpc-cors URL.
|
--rpccorsdomain URL Equivalent to --jsonrpc-cors URL.
|
||||||
--ipcdisable Equivalent to --no-ipc.
|
--ipcdisable Equivalent to --no-ipc.
|
||||||
--ipc-off Equivalent to --no-ipc.
|
--ipc-off Equivalent to --no-ipc.
|
||||||
--ipcapi APIS Equivalent to --ipc-apis APIS.
|
--ipcapi APIS Equivalent to --ipc-apis APIS.
|
||||||
--ipcpath PATH Equivalent to --ipc-path PATH.
|
--ipcpath PATH Equivalent to --ipc-path PATH.
|
||||||
--gasprice WEI Minimum amount of Wei per GAS to be paid for a
|
--gasprice WEI Minimum amount of Wei per GAS to be paid for a
|
||||||
transaction to be accepted for mining. Overrides
|
transaction to be accepted for mining. Overrides
|
||||||
--basic-tx-usd.
|
--basic-tx-usd.
|
||||||
--etherbase ADDRESS Equivalent to --author ADDRESS.
|
--etherbase ADDRESS Equivalent to --author ADDRESS.
|
||||||
--extradata STRING Equivalent to --extra-data STRING.
|
--extradata STRING Equivalent to --extra-data STRING.
|
||||||
--cache MB Equivalent to --cache-size MB.
|
--cache MB Equivalent to --cache-size MB.
|
||||||
|
|
||||||
Internal Options:
|
Internal Options:
|
||||||
--can-restart Executable will auto-restart if exiting with 69.
|
--can-restart Executable will auto-restart if exiting with 69.
|
||||||
|
|
||||||
Miscellaneous Options:
|
Miscellaneous Options:
|
||||||
-c --config CONFIG Specify a filename containing a configuration file.
|
-c --config CONFIG Specify a filename containing a configuration file.
|
||||||
(default: {flag_config})
|
(default: {flag_config})
|
||||||
-l --logging LOGGING Specify the logging level. Must conform to the same
|
-l --logging LOGGING Specify the logging level. Must conform to the same
|
||||||
format as RUST_LOG. (default: {flag_logging:?})
|
format as RUST_LOG. (default: {flag_logging:?})
|
||||||
--log-file FILENAME Specify a filename into which logging should be
|
--log-file FILENAME Specify a filename into which logging should be
|
||||||
appended. (default: {flag_log_file:?})
|
appended. (default: {flag_log_file:?})
|
||||||
--no-config Don't load a configuration file.
|
--no-config Don't load a configuration file.
|
||||||
--no-color Don't use terminal color codes in output. (default: {flag_no_color})
|
--no-color Don't use terminal color codes in output. (default: {flag_no_color})
|
||||||
-v --version Show information about version.
|
-v --version Show information about version.
|
||||||
-h --help Show this screen.
|
-h --help Show this screen.
|
||||||
|
@ -491,7 +491,8 @@ impl Configuration {
|
|||||||
ban_duration: Duration::from_secs(self.args.flag_tx_queue_ban_time as u64),
|
ban_duration: Duration::from_secs(self.args.flag_tx_queue_ban_time as u64),
|
||||||
},
|
},
|
||||||
None => Banning::Disabled,
|
None => Banning::Disabled,
|
||||||
}
|
},
|
||||||
|
refuse_service_transactions: self.args.flag_refuse_service_transactions,
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(options)
|
Ok(options)
|
||||||
|
@ -66,6 +66,7 @@ fn miner_service(spec: &Spec, accounts: Arc<AccountProvider>) -> Arc<Miner> {
|
|||||||
reseal_min_period: Duration::from_secs(0),
|
reseal_min_period: Duration::from_secs(0),
|
||||||
work_queue_size: 50,
|
work_queue_size: 50,
|
||||||
enable_resubmission: true,
|
enable_resubmission: true,
|
||||||
|
refuse_service_transactions: false,
|
||||||
},
|
},
|
||||||
GasPricer::new_fixed(20_000_000_000u64.into()),
|
GasPricer::new_fixed(20_000_000_000u64.into()),
|
||||||
&spec,
|
&spec,
|
||||||
|
@ -17,6 +17,8 @@
|
|||||||
// Rust/Parity ABI struct autogenerator.
|
// Rust/Parity ABI struct autogenerator.
|
||||||
// By Gav Wood, 2016.
|
// By Gav Wood, 2016.
|
||||||
|
|
||||||
|
var fs = require('fs');
|
||||||
|
|
||||||
String.prototype.replaceAll = function(f, t) { return this.split(f).join(t); }
|
String.prototype.replaceAll = function(f, t) { return this.split(f).join(t); }
|
||||||
String.prototype.toSnake = function(){
|
String.prototype.toSnake = function(){
|
||||||
return this.replace(/([A-Z])/g, function($1){return "_"+$1.toLowerCase();});
|
return this.replace(/([A-Z])/g, function($1){return "_"+$1.toLowerCase();});
|
||||||
@ -24,6 +26,7 @@ String.prototype.toSnake = function(){
|
|||||||
|
|
||||||
function makeContractFile(name, json, prefs) {
|
function makeContractFile(name, json, prefs) {
|
||||||
return `// Autogenerated from JSON contract definition using Rust contract convertor.
|
return `// Autogenerated from JSON contract definition using Rust contract convertor.
|
||||||
|
// Command line: ${process.argv.slice(2).join(' ')}
|
||||||
#![allow(unused_imports)]
|
#![allow(unused_imports)]
|
||||||
use std::string::String;
|
use std::string::String;
|
||||||
use std::result::Result;
|
use std::result::Result;
|
||||||
@ -39,14 +42,15 @@ function convertContract(name, json, prefs) {
|
|||||||
return `${prefs._pub ? "pub " : ""}struct ${name} {
|
return `${prefs._pub ? "pub " : ""}struct ${name} {
|
||||||
contract: ethabi::Contract,
|
contract: ethabi::Contract,
|
||||||
address: util::Address,
|
address: util::Address,
|
||||||
do_call: Box<Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send ${prefs._sync ? "+ Sync " : ""}+ 'static>,
|
${prefs._explicit_do_call ? "" : `do_call: Box<Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send${prefs._sync ? " + Sync " : ""}+ 'static>,`}
|
||||||
}
|
}
|
||||||
impl ${name} {
|
impl ${name} {
|
||||||
pub fn new<F>(address: util::Address, do_call: F) -> Self where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send ${prefs._sync ? "+ Sync " : ""}+ 'static {
|
pub fn new${prefs._explicit_do_call ? "" : "<F>"}(address: util::Address${prefs._explicit_do_call ? "" : `", do_call: F"`}) -> Self
|
||||||
|
${prefs._explicit_do_call ? "" : `where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send ${prefs._sync ? "+ Sync " : ""}+ 'static`} {
|
||||||
${name} {
|
${name} {
|
||||||
contract: ethabi::Contract::new(ethabi::Interface::load(b"${JSON.stringify(json.filter(a => a.type == 'function')).replaceAll('"', '\\"')}").expect("JSON is autogenerated; qed")),
|
contract: ethabi::Contract::new(ethabi::Interface::load(b"${JSON.stringify(json.filter(a => a.type == 'function')).replaceAll('"', '\\"')}").expect("JSON is autogenerated; qed")),
|
||||||
address: address,
|
address: address,
|
||||||
do_call: Box::new(do_call),
|
${prefs._explicit_do_call ? "" : `do_call: Box::new(do_call),`}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fn as_string<T: fmt::Debug>(e: T) -> String { format!("{:?}", e) }
|
fn as_string<T: fmt::Debug>(e: T) -> String { format!("{:?}", e) }
|
||||||
@ -205,6 +209,7 @@ function tokenExtract(expr, type, _prefs) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function convertFunction(json, _prefs) {
|
function convertFunction(json, _prefs) {
|
||||||
|
let cprefs = _prefs || {};
|
||||||
let prefs = (_prefs || {})[json.name] || (_prefs || {})['_'] || {};
|
let prefs = (_prefs || {})[json.name] || (_prefs || {})['_'] || {};
|
||||||
let snakeName = json.name.toSnake();
|
let snakeName = json.name.toSnake();
|
||||||
let params = json.inputs.map((x, i) => (x.name ? x.name.toSnake() : ("_" + (i + 1))) + ": " + mapType(x.name, x.type, prefs[x.name]));
|
let params = json.inputs.map((x, i) => (x.name ? x.name.toSnake() : ("_" + (i + 1))) + ": " + mapType(x.name, x.type, prefs[x.name]));
|
||||||
@ -212,18 +217,35 @@ function convertFunction(json, _prefs) {
|
|||||||
return `
|
return `
|
||||||
/// Auto-generated from: \`${JSON.stringify(json)}\`
|
/// Auto-generated from: \`${JSON.stringify(json)}\`
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
pub fn ${snakeName}(&self${params.length > 0 ? ', ' + params.join(", ") : ''}) -> Result<${returns}, String> {
|
pub fn ${snakeName}${cprefs._explicit_do_call ? "<F>" : ""}(&self${cprefs._explicit_do_call ? `, do_call: &F` : ""}${params.length > 0 ? ', ' + params.join(", ") : ''}) -> Result<${returns}, String>
|
||||||
|
${cprefs._explicit_do_call ? `where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send ${prefs._sync ? "+ Sync " : ""}` : ""} {
|
||||||
let call = self.contract.function("${json.name}".into()).map_err(Self::as_string)?;
|
let call = self.contract.function("${json.name}".into()).map_err(Self::as_string)?;
|
||||||
let data = call.encode_call(
|
let data = call.encode_call(
|
||||||
vec![${json.inputs.map((x, i) => convertToken(x.name ? x.name.toSnake() : ("_" + (i + 1)), x.type, prefs[x.name])).join(', ')}]
|
vec![${json.inputs.map((x, i) => convertToken(x.name ? x.name.toSnake() : ("_" + (i + 1)), x.type, prefs[x.name])).join(', ')}]
|
||||||
).map_err(Self::as_string)?;
|
).map_err(Self::as_string)?;
|
||||||
${json.outputs.length > 0 ? 'let output = ' : ''}call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
${json.outputs.length > 0 ? 'let output = ' : ''}call.decode_output((${cprefs._explicit_do_call ? "" : "self."}do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
|
||||||
${json.outputs.length > 0 ? 'let mut result = output.into_iter().rev().collect::<Vec<_>>();' : ''}
|
${json.outputs.length > 0 ? 'let mut result = output.into_iter().rev().collect::<Vec<_>>();' : ''}
|
||||||
Ok((${json.outputs.map((o, i) => tokenExtract('result.pop().ok_or("Invalid return arity")?', o.type, prefs[o.name])).join(', ')}))
|
Ok((${json.outputs.map((o, i) => tokenExtract('result.pop().ok_or("Invalid return arity")?', o.type, prefs[o.name])).join(', ')}))
|
||||||
}`;
|
}`;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// default preferences:
|
||||||
|
let prefs = {"_pub": true, "_": {"_client": {"string": true}, "_platform": {"string": true}}, "_sync": true};
|
||||||
|
// default contract json ABI
|
||||||
let jsonabi = [{"constant":true,"inputs":[],"name":"getValidators","outputs":[{"name":"","type":"address[]"}],"payable":false,"type":"function"}];
|
let jsonabi = [{"constant":true,"inputs":[],"name":"getValidators","outputs":[{"name":"","type":"address[]"}],"payable":false,"type":"function"}];
|
||||||
|
|
||||||
let out = makeContractFile("Contract", jsonabi, {"_pub": true, "_": {"_client": {"string": true}, "_platform": {"string": true}}, "_sync": true});
|
// parse command line options
|
||||||
|
for (let i = 1; i < process.argv.length; ++i) {
|
||||||
|
let arg = process.argv[i];
|
||||||
|
if (arg.indexOf("--jsonabi") == 0) {
|
||||||
|
jsonabi = arg.slice(10);
|
||||||
|
if (fs.existsSync(jsonabi)) {
|
||||||
|
jsonabi = JSON.parse(fs.readFileSync(jsonabi).toString());
|
||||||
|
}
|
||||||
|
} else if (arg.indexOf("--explicit-do-call") == 0) {
|
||||||
|
prefs._explicit_do_call = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let out = makeContractFile("Contract", jsonabi, prefs);
|
||||||
console.log(`${out}`);
|
console.log(`${out}`);
|
||||||
|
@ -96,6 +96,7 @@ use ethcore::header::{BlockNumber, Header as BlockHeader};
|
|||||||
use ethcore::client::{BlockChainClient, BlockStatus, BlockId, BlockChainInfo, BlockImportError, BlockQueueInfo};
|
use ethcore::client::{BlockChainClient, BlockStatus, BlockId, BlockChainInfo, BlockImportError, BlockQueueInfo};
|
||||||
use ethcore::error::*;
|
use ethcore::error::*;
|
||||||
use ethcore::snapshot::{ManifestData, RestorationStatus};
|
use ethcore::snapshot::{ManifestData, RestorationStatus};
|
||||||
|
use ethcore::transaction::PendingTransaction;
|
||||||
use sync_io::SyncIo;
|
use sync_io::SyncIo;
|
||||||
use time;
|
use time;
|
||||||
use super::SyncConfig;
|
use super::SyncConfig;
|
||||||
@ -1949,7 +1950,46 @@ impl ChainSync {
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
let all_transactions_hashes = transactions.iter().map(|tx| tx.transaction.hash()).collect::<HashSet<H256>>();
|
let (transactions, service_transactions): (Vec<_>, Vec<_>) = transactions.into_iter()
|
||||||
|
.partition(|tx| !tx.transaction.gas_price.is_zero());
|
||||||
|
|
||||||
|
// usual transactions could be propagated to all peers
|
||||||
|
let mut affected_peers = HashSet::new();
|
||||||
|
if !transactions.is_empty() {
|
||||||
|
let peers = self.select_peers_for_transactions(|_| true);
|
||||||
|
affected_peers = self.propagate_transactions_to_peers(io, peers, transactions);
|
||||||
|
}
|
||||||
|
|
||||||
|
// most of times service_transactions will be empty
|
||||||
|
// => there's no need to merge packets
|
||||||
|
if !service_transactions.is_empty() {
|
||||||
|
let service_transactions_peers = self.select_peers_for_transactions(|peer_id| accepts_service_transaction(&io.peer_info(*peer_id)));
|
||||||
|
let service_transactions_affected_peers = self.propagate_transactions_to_peers(io, service_transactions_peers, service_transactions);
|
||||||
|
affected_peers.extend(&service_transactions_affected_peers);
|
||||||
|
}
|
||||||
|
|
||||||
|
affected_peers.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn select_peers_for_transactions<F>(&self, filter: F) -> Vec<PeerId>
|
||||||
|
where F: Fn(&PeerId) -> bool {
|
||||||
|
// sqrt(x)/x scaled to max u32
|
||||||
|
let fraction = (self.peers.len() as f64).powf(-0.5).mul(u32::max_value() as f64).round() as u32;
|
||||||
|
let small = self.peers.len() < MIN_PEERS_PROPAGATION;
|
||||||
|
|
||||||
|
let mut random = random::new();
|
||||||
|
self.peers.keys()
|
||||||
|
.cloned()
|
||||||
|
.filter(filter)
|
||||||
|
.filter(|_| small || random.next_u32() < fraction)
|
||||||
|
.take(MAX_PEERS_PROPAGATION)
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn propagate_transactions_to_peers(&mut self, io: &mut SyncIo, peers: Vec<PeerId>, transactions: Vec<PendingTransaction>) -> HashSet<PeerId> {
|
||||||
|
let all_transactions_hashes = transactions.iter()
|
||||||
|
.map(|tx| tx.transaction.hash())
|
||||||
|
.collect::<HashSet<H256>>();
|
||||||
let all_transactions_rlp = {
|
let all_transactions_rlp = {
|
||||||
let mut packet = RlpStream::new_list(transactions.len());
|
let mut packet = RlpStream::new_list(transactions.len());
|
||||||
for tx in &transactions { packet.append(&tx.transaction); }
|
for tx in &transactions { packet.append(&tx.transaction); }
|
||||||
@ -1960,26 +2000,24 @@ impl ChainSync {
|
|||||||
self.transactions_stats.retain(&all_transactions_hashes);
|
self.transactions_stats.retain(&all_transactions_hashes);
|
||||||
|
|
||||||
// sqrt(x)/x scaled to max u32
|
// sqrt(x)/x scaled to max u32
|
||||||
let fraction = (self.peers.len() as f64).powf(-0.5).mul(u32::max_value() as f64).round() as u32;
|
|
||||||
let small = self.peers.len() < MIN_PEERS_PROPAGATION;
|
|
||||||
let block_number = io.chain().chain_info().best_block_number;
|
let block_number = io.chain().chain_info().best_block_number;
|
||||||
|
|
||||||
let mut random = random::new();
|
|
||||||
let lucky_peers = {
|
let lucky_peers = {
|
||||||
let stats = &mut self.transactions_stats;
|
peers.into_iter()
|
||||||
self.peers.iter_mut()
|
.filter_map(|peer_id| {
|
||||||
.filter(|_| small || random.next_u32() < fraction)
|
let stats = &mut self.transactions_stats;
|
||||||
.take(MAX_PEERS_PROPAGATION)
|
let peer_info = self.peers.get_mut(&peer_id)
|
||||||
.filter_map(|(peer_id, mut peer_info)| {
|
.expect("peer_id is form peers; peers is result of select_peers_for_transactions; select_peers_for_transactions selects peers from self.peers; qed");
|
||||||
|
|
||||||
// Send all transactions
|
// Send all transactions
|
||||||
if peer_info.last_sent_transactions.is_empty() {
|
if peer_info.last_sent_transactions.is_empty() {
|
||||||
// update stats
|
// update stats
|
||||||
for hash in &all_transactions_hashes {
|
for hash in &all_transactions_hashes {
|
||||||
let id = io.peer_session_info(*peer_id).and_then(|info| info.id);
|
let id = io.peer_session_info(peer_id).and_then(|info| info.id);
|
||||||
stats.propagated(*hash, id, block_number);
|
stats.propagated(*hash, id, block_number);
|
||||||
}
|
}
|
||||||
peer_info.last_sent_transactions = all_transactions_hashes.clone();
|
peer_info.last_sent_transactions = all_transactions_hashes.clone();
|
||||||
return Some((*peer_id, all_transactions_hashes.len(), all_transactions_rlp.clone()));
|
return Some((peer_id, all_transactions_hashes.len(), all_transactions_rlp.clone()));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get hashes of all transactions to send to this peer
|
// Get hashes of all transactions to send to this peer
|
||||||
@ -1997,7 +2035,7 @@ impl ChainSync {
|
|||||||
if to_send.contains(&tx.transaction.hash()) {
|
if to_send.contains(&tx.transaction.hash()) {
|
||||||
packet.append(&tx.transaction);
|
packet.append(&tx.transaction);
|
||||||
// update stats
|
// update stats
|
||||||
let id = io.peer_session_info(*peer_id).and_then(|info| info.id);
|
let id = io.peer_session_info(peer_id).and_then(|info| info.id);
|
||||||
stats.propagated(tx.transaction.hash(), id, block_number);
|
stats.propagated(tx.transaction.hash(), id, block_number);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2007,22 +2045,25 @@ impl ChainSync {
|
|||||||
.chain(&to_send)
|
.chain(&to_send)
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect();
|
.collect();
|
||||||
Some((*peer_id, to_send.len(), packet.out()))
|
Some((peer_id, to_send.len(), packet.out()))
|
||||||
})
|
})
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
};
|
};
|
||||||
|
|
||||||
// Send RLPs
|
// Send RLPs
|
||||||
let peers = lucky_peers.len();
|
let mut peers = HashSet::new();
|
||||||
if peers > 0 {
|
if lucky_peers.len() > 0 {
|
||||||
let mut max_sent = 0;
|
let mut max_sent = 0;
|
||||||
|
let lucky_peers_len = lucky_peers.len();
|
||||||
for (peer_id, sent, rlp) in lucky_peers {
|
for (peer_id, sent, rlp) in lucky_peers {
|
||||||
|
peers.insert(peer_id);
|
||||||
self.send_packet(io, peer_id, TRANSACTIONS_PACKET, rlp);
|
self.send_packet(io, peer_id, TRANSACTIONS_PACKET, rlp);
|
||||||
trace!(target: "sync", "{:02} <- Transactions ({} entries)", peer_id, sent);
|
trace!(target: "sync", "{:02} <- Transactions ({} entries)", peer_id, sent);
|
||||||
max_sent = max(max_sent, sent);
|
max_sent = max(max_sent, sent);
|
||||||
}
|
}
|
||||||
debug!(target: "sync", "Sent up to {} transactions to {} peers.", max_sent, peers);
|
debug!(target: "sync", "Sent up to {} transactions to {} peers.", max_sent, lucky_peers_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
peers
|
peers
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2109,12 +2150,30 @@ impl ChainSync {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Checks if peer is able to process service transactions
|
||||||
|
fn accepts_service_transaction(client_id: &str) -> bool {
|
||||||
|
// Parity versions starting from this will accept service-transactions
|
||||||
|
const SERVICE_TRANSACTIONS_VERSION: (u32, u32) = (1u32, 6u32);
|
||||||
|
// Parity client string prefix
|
||||||
|
const PARITY_CLIENT_ID_PREFIX: &'static str = "Parity/v";
|
||||||
|
|
||||||
|
if !client_id.starts_with(PARITY_CLIENT_ID_PREFIX) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
let ver: Vec<u32> = client_id[PARITY_CLIENT_ID_PREFIX.len()..].split('.')
|
||||||
|
.take(2)
|
||||||
|
.filter_map(|s| s.parse().ok())
|
||||||
|
.collect();
|
||||||
|
ver.len() == 2 && (ver[0] > SERVICE_TRANSACTIONS_VERSION.0 || (ver[0] == SERVICE_TRANSACTIONS_VERSION.0 && ver[1] >= SERVICE_TRANSACTIONS_VERSION.1))
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::collections::{HashSet, VecDeque};
|
use std::collections::{HashSet, VecDeque};
|
||||||
|
use network::PeerId;
|
||||||
use tests::helpers::*;
|
use tests::helpers::*;
|
||||||
use tests::snapshot::TestSnapshotService;
|
use tests::snapshot::TestSnapshotService;
|
||||||
use util::{U256, Address, RwLock};
|
use util::{Uint, U256, Address, RwLock};
|
||||||
use util::sha3::Hashable;
|
use util::sha3::Hashable;
|
||||||
use util::hash::{H256, FixedHash};
|
use util::hash::{H256, FixedHash};
|
||||||
use util::bytes::Bytes;
|
use util::bytes::Bytes;
|
||||||
@ -2351,7 +2410,12 @@ mod tests {
|
|||||||
|
|
||||||
fn dummy_sync_with_peer(peer_latest_hash: H256, client: &BlockChainClient) -> ChainSync {
|
fn dummy_sync_with_peer(peer_latest_hash: H256, client: &BlockChainClient) -> ChainSync {
|
||||||
let mut sync = ChainSync::new(SyncConfig::default(), client);
|
let mut sync = ChainSync::new(SyncConfig::default(), client);
|
||||||
sync.peers.insert(0,
|
insert_dummy_peer(&mut sync, 0, peer_latest_hash);
|
||||||
|
sync
|
||||||
|
}
|
||||||
|
|
||||||
|
fn insert_dummy_peer(sync: &mut ChainSync, peer_id: PeerId, peer_latest_hash: H256) {
|
||||||
|
sync.peers.insert(peer_id,
|
||||||
PeerInfo {
|
PeerInfo {
|
||||||
protocol_version: 0,
|
protocol_version: 0,
|
||||||
genesis: H256::zero(),
|
genesis: H256::zero(),
|
||||||
@ -2370,7 +2434,7 @@ mod tests {
|
|||||||
asking_snapshot_data: None,
|
asking_snapshot_data: None,
|
||||||
block_set: None,
|
block_set: None,
|
||||||
});
|
});
|
||||||
sync
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -2622,6 +2686,79 @@ mod tests {
|
|||||||
assert_eq!(stats.len(), 1, "Should maintain stats for single transaction.")
|
assert_eq!(stats.len(), 1, "Should maintain stats for single transaction.")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn should_propagate_service_transaction_to_selected_peers_only() {
|
||||||
|
let mut client = TestBlockChainClient::new();
|
||||||
|
client.insert_transaction_with_gas_price_to_queue(U256::zero());
|
||||||
|
let block_hash = client.block_hash_delta_minus(1);
|
||||||
|
let mut sync = ChainSync::new(SyncConfig::default(), &client);
|
||||||
|
let queue = RwLock::new(VecDeque::new());
|
||||||
|
let ss = TestSnapshotService::new();
|
||||||
|
let mut io = TestIo::new(&mut client, &ss, &queue, None);
|
||||||
|
|
||||||
|
// when peer#1 is Geth
|
||||||
|
insert_dummy_peer(&mut sync, 1, block_hash);
|
||||||
|
io.peers_info.insert(1, "Geth".to_owned());
|
||||||
|
// and peer#2 is Parity, accepting service transactions
|
||||||
|
insert_dummy_peer(&mut sync, 2, block_hash);
|
||||||
|
io.peers_info.insert(2, "Parity/v1.6".to_owned());
|
||||||
|
// and peer#3 is Parity, discarding service transactions
|
||||||
|
insert_dummy_peer(&mut sync, 3, block_hash);
|
||||||
|
io.peers_info.insert(3, "Parity/v1.5".to_owned());
|
||||||
|
// and peer#4 is Parity, accepting service transactions
|
||||||
|
insert_dummy_peer(&mut sync, 4, block_hash);
|
||||||
|
io.peers_info.insert(4, "Parity/v1.7.3-ABCDEFGH".to_owned());
|
||||||
|
|
||||||
|
// and new service transaction is propagated to peers
|
||||||
|
sync.propagate_new_transactions(&mut io);
|
||||||
|
|
||||||
|
// peer#2 && peer#4 are receiving service transaction
|
||||||
|
assert!(io.packets.iter().any(|p| p.packet_id == 0x02 && p.recipient == 2)); // TRANSACTIONS_PACKET
|
||||||
|
assert!(io.packets.iter().any(|p| p.packet_id == 0x02 && p.recipient == 4)); // TRANSACTIONS_PACKET
|
||||||
|
assert_eq!(io.packets.len(), 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn should_propagate_service_transaction_is_sent_as_separate_message() {
|
||||||
|
let mut client = TestBlockChainClient::new();
|
||||||
|
let tx1_hash = client.insert_transaction_to_queue();
|
||||||
|
let tx2_hash = client.insert_transaction_with_gas_price_to_queue(U256::zero());
|
||||||
|
let block_hash = client.block_hash_delta_minus(1);
|
||||||
|
let mut sync = ChainSync::new(SyncConfig::default(), &client);
|
||||||
|
let queue = RwLock::new(VecDeque::new());
|
||||||
|
let ss = TestSnapshotService::new();
|
||||||
|
let mut io = TestIo::new(&mut client, &ss, &queue, None);
|
||||||
|
|
||||||
|
// when peer#1 is Parity, accepting service transactions
|
||||||
|
insert_dummy_peer(&mut sync, 1, block_hash);
|
||||||
|
io.peers_info.insert(1, "Parity/v1.6".to_owned());
|
||||||
|
|
||||||
|
// and service + non-service transactions are propagated to peers
|
||||||
|
sync.propagate_new_transactions(&mut io);
|
||||||
|
|
||||||
|
// two separate packets for peer are queued:
|
||||||
|
// 1) with non-service-transaction
|
||||||
|
// 2) with service transaction
|
||||||
|
let sent_transactions: Vec<UnverifiedTransaction> = io.packets.iter()
|
||||||
|
.filter_map(|p| {
|
||||||
|
if p.packet_id != 0x02 || p.recipient != 1 { // TRANSACTIONS_PACKET
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let rlp = UntrustedRlp::new(&*p.data);
|
||||||
|
let item_count = rlp.item_count();
|
||||||
|
if item_count != 1 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
rlp.at(0).ok().and_then(|r| r.as_val().ok())
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
assert_eq!(sent_transactions.len(), 2);
|
||||||
|
assert!(sent_transactions.iter().any(|tx| tx.hash() == tx1_hash));
|
||||||
|
assert!(sent_transactions.iter().any(|tx| tx.hash() == tx2_hash));
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn handles_peer_new_block_malformed() {
|
fn handles_peer_new_block_malformed() {
|
||||||
let mut client = TestBlockChainClient::new();
|
let mut client = TestBlockChainClient::new();
|
||||||
|
@ -50,6 +50,7 @@ pub struct TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p {
|
|||||||
pub sender: Option<PeerId>,
|
pub sender: Option<PeerId>,
|
||||||
pub to_disconnect: HashSet<PeerId>,
|
pub to_disconnect: HashSet<PeerId>,
|
||||||
pub packets: Vec<TestPacket>,
|
pub packets: Vec<TestPacket>,
|
||||||
|
pub peers_info: HashMap<PeerId, String>,
|
||||||
overlay: RwLock<HashMap<BlockNumber, Bytes>>,
|
overlay: RwLock<HashMap<BlockNumber, Bytes>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -63,6 +64,7 @@ impl<'p, C> TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p {
|
|||||||
to_disconnect: HashSet::new(),
|
to_disconnect: HashSet::new(),
|
||||||
overlay: RwLock::new(HashMap::new()),
|
overlay: RwLock::new(HashMap::new()),
|
||||||
packets: Vec::new(),
|
packets: Vec::new(),
|
||||||
|
peers_info: HashMap::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -112,6 +114,12 @@ impl<'p, C> SyncIo for TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p {
|
|||||||
&*self.chain
|
&*self.chain
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn peer_info(&self, peer_id: PeerId) -> String {
|
||||||
|
self.peers_info.get(&peer_id)
|
||||||
|
.cloned()
|
||||||
|
.unwrap_or_else(|| peer_id.to_string())
|
||||||
|
}
|
||||||
|
|
||||||
fn snapshot_service(&self) -> &SnapshotService {
|
fn snapshot_service(&self) -> &SnapshotService {
|
||||||
self.snapshot_service
|
self.snapshot_service
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user