This commit is contained in:
gregg dourgarian 2016-07-19 13:42:23 -05:00 committed by Arkadiy Paronyan
parent 861c8d0701
commit d67369a01c
13 changed files with 20 additions and 20 deletions

View File

@ -38,7 +38,7 @@ pub enum BlockLocation {
/// It's not a part of the canon chain. /// It's not a part of the canon chain.
Branch, Branch,
/// It's part of the fork which should become canon chain, /// It's part of the fork which should become canon chain,
/// because it's total difficulty is higher than current /// because its total difficulty is higher than current
/// canon chain difficulty. /// canon chain difficulty.
BranchBecomingCanonChain(BranchBecomingCanonChainData), BranchBecomingCanonChain(BranchBecomingCanonChainData),
} }

View File

@ -1002,7 +1002,7 @@ impl MiningBlockChainClient for Client {
self.trie_factory.clone(), self.trie_factory.clone(),
false, // TODO: this will need to be parameterised once we want to do immediate mining insertion. false, // TODO: this will need to be parameterised once we want to do immediate mining insertion.
self.state_db.lock().boxed_clone(), self.state_db.lock().boxed_clone(),
&self.chain.block_header(&h).expect("h is best block hash: so it's header must exist: qed"), &self.chain.block_header(&h).expect("h is best block hash: so its header must exist: qed"),
self.build_last_hashes(h.clone()), self.build_last_hashes(h.clone()),
author, author,
gas_range_target, gas_range_target,

View File

@ -122,7 +122,7 @@ impl Ord for TransactionOrigin {
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
/// Light structure used to identify transaction and it's order /// Light structure used to identify transaction and its order
struct TransactionOrder { struct TransactionOrder {
/// Primary ordering factory. Difference between transaction nonce and expected nonce in state /// Primary ordering factory. Difference between transaction nonce and expected nonce in state
/// (e.g. Tx(nonce:5), State(nonce:0) -> height: 5) /// (e.g. Tx(nonce:5), State(nonce:0) -> height: 5)

View File

@ -50,7 +50,7 @@ pub fn expand_serialization_implementation(
let impl_item = match serialize_item(cx, &builder, &item) { let impl_item = match serialize_item(cx, &builder, &item) {
Ok(item) => item, Ok(item) => item,
Err(Error) => { Err(Error) => {
// An error occured, but it should have been reported already. // An error occurred, but it should have been reported already.
return; return;
}, },
}; };

View File

@ -107,7 +107,7 @@ pub fn init_client<S>(socket_addr: &str) -> Result<GuardedSocket<S>, SocketError
}) })
} }
/// Error occured while establising socket or endpoint /// Error occurred while establising socket or endpoint
#[derive(Debug)] #[derive(Debug)]
pub enum SocketError { pub enum SocketError {
/// Error establising duplex (paired) socket and/or endpoint /// Error establising duplex (paired) socket and/or endpoint

View File

@ -36,7 +36,7 @@ Operating Options:
--mode MODE Set the operating mode. MODE can be one of: --mode MODE Set the operating mode. MODE can be one of:
active - Parity continuously syncs the chain. active - Parity continuously syncs the chain.
passive - Parity syncs initially, then sleeps and passive - Parity syncs initially, then sleeps and
wakes regularly to resync. wakes regularly to resync.
dark - Parity syncs only when an external interface dark - Parity syncs only when an external interface
is active. [default: active]. is active. [default: active].
--mode-timeout SECS Specify the number of seconds before inactivity --mode-timeout SECS Specify the number of seconds before inactivity
@ -141,11 +141,11 @@ Sealing/Mining Options:
own - reseal only on a new local transaction; own - reseal only on a new local transaction;
ext - reseal only on a new external transaction; ext - reseal only on a new external transaction;
all - reseal on all new transactions [default: own]. all - reseal on all new transactions [default: own].
--reseal-min-period MS Specify the minimum time between reseals from --reseal-min-period MS Specify the minimum time between reseals from
incoming transactions. MS is time measured in incoming transactions. MS is time measured in
milliseconds [default: 2000]. milliseconds [default: 2000].
--work-queue-size ITEMS Specify the number of historical work packages --work-queue-size ITEMS Specify the number of historical work packages
which are kept cached lest a solution is found for which are kept cached lest a solution is found for
them later. High values take more memory but result them later. High values take more memory but result
in fewer unusable solutions [default: 20]. in fewer unusable solutions [default: 20].
--tx-gas-limit GAS Apply a limit of GAS as the maximum amount of gas --tx-gas-limit GAS Apply a limit of GAS as the maximum amount of gas
@ -196,7 +196,7 @@ Footprint Options:
fast - maintain journal overlay. Fast but 50MB used. fast - maintain journal overlay. Fast but 50MB used.
auto - use the method most recently synced or auto - use the method most recently synced or
default to fast if none synced [default: auto]. default to fast if none synced [default: auto].
--cache-pref-size BYTES Specify the prefered size of the blockchain cache in --cache-pref-size BYTES Specify the preferred size of the blockchain cache in
bytes [default: 16384]. bytes [default: 16384].
--cache-max-size BYTES Specify the maximum size of the blockchain cache in --cache-max-size BYTES Specify the maximum size of the blockchain cache in
bytes [default: 262144]. bytes [default: 262144].

View File

@ -158,7 +158,7 @@ fn transaction_error(error: EthcoreError) -> Error {
"Transaction fee is too low. There is another transaction with same nonce in the queue. Try increasing the fee or incrementing the nonce.".into() "Transaction fee is too low. There is another transaction with same nonce in the queue. Try increasing the fee or incrementing the nonce.".into()
}, },
LimitReached => { LimitReached => {
"There is too many transactions in the queue. Your transaction was dropped due to limit. Try increasing the fee.".into() "There are too many transactions in the queue. Your transaction was dropped due to limit. Try increasing the fee.".into()
}, },
InsufficientGasPrice { minimal, got } => { InsufficientGasPrice { minimal, got } => {
format!("Transaction fee is too low. It does not satisfy your node's minimal fee (minimal: {}, got: {}). Try increasing the fee.", minimal, got) format!("Transaction fee is too low. It does not satisfy your node's minimal fee (minimal: {}, got: {}). Try increasing the fee.", minimal, got)

View File

@ -83,7 +83,7 @@ pub trait Eth: Sized + Send + Sync + 'static {
/// Estimate gas needed for execution of given contract. /// Estimate gas needed for execution of given contract.
fn estimate_gas(&self, _: Params) -> Result<Value, Error>; fn estimate_gas(&self, _: Params) -> Result<Value, Error>;
/// Get transaction by it's hash. /// Get transaction by its hash.
fn transaction_by_hash(&self, _: Params) -> Result<Value, Error>; fn transaction_by_hash(&self, _: Params) -> Result<Value, Error>;
/// Returns transaction at given block hash and index. /// Returns transaction at given block hash and index.

View File

@ -116,7 +116,7 @@ impl Server {
io io
)), )),
Err(any_error) => die(format!( Err(any_error) => die(format!(
"Signer: Unknown error occured when starting Signer. Details: {:?}", "Signer: Unknown error occurred when starting Signer. Details: {:?}",
any_error any_error
)), )),
Ok(server) => server, Ok(server) => server,

View File

@ -689,7 +689,7 @@ impl ChainSync {
self.state = SyncState::Waiting; self.state = SyncState::Waiting;
} }
/// Find something to do for a peer. Called for a new peer or when a peer is done with it's task. /// Find something to do for a peer. Called for a new peer or when a peer is done with its task.
fn sync_peer(&mut self, io: &mut SyncIo, peer_id: PeerId, force: bool) { fn sync_peer(&mut self, io: &mut SyncIo, peer_id: PeerId, force: bool) {
if !self.active_peers.contains(&peer_id) { if !self.active_peers.contains(&peer_id) {
trace!(target: "sync", "Skipping deactivated peer"); trace!(target: "sync", "Skipping deactivated peer");

View File

@ -201,7 +201,7 @@ impl<'a, 'view> View<'a, 'view> for UntrustedRlp<'a> where 'a: 'view {
return Err(DecoderError::RlpExpectedToBeList); return Err(DecoderError::RlpExpectedToBeList);
} }
// move to cached position if it's index is less or equal to // move to cached position if its index is less or equal to
// current search index, otherwise move to beginning of list // current search index, otherwise move to beginning of list
let c = self.offset_cache.get(); let c = self.offset_cache.get();
let (mut bytes, to_skip) = match c.index <= index { let (mut bytes, to_skip) = match c.index <= index {
@ -334,9 +334,9 @@ impl<'a> BasicDecoder<'a> {
/// Return first item info. /// Return first item info.
fn payload_info(bytes: &[u8]) -> Result<PayloadInfo, DecoderError> { fn payload_info(bytes: &[u8]) -> Result<PayloadInfo, DecoderError> {
let item = try!(PayloadInfo::from(bytes)); let item = try!(PayloadInfo::from(bytes));
match item.header_len.checked_add(item.value_len) { match item.header_len.checked_add(item.value_len) {
Some(x) if x <= bytes.len() => Ok(item), Some(x) if x <= bytes.len() => Ok(item),
_ => Err(DecoderError::RlpIsTooShort), _ => Err(DecoderError::RlpIsTooShort),
} }
} }
} }

View File

@ -84,7 +84,7 @@ impl<'db> TrieDB<'db> {
ret ret
} }
/// Convert a vector of hashes to a hashmap of hash to occurances. /// Convert a vector of hashes to a hashmap of hash to occurrences.
pub fn to_map(hashes: Vec<H256>) -> HashMap<H256, u32> { pub fn to_map(hashes: Vec<H256>) -> HashMap<H256, u32> {
let mut r: HashMap<H256, u32> = HashMap::new(); let mut r: HashMap<H256, u32> = HashMap::new();
for h in hashes.into_iter() { for h in hashes.into_iter() {
@ -93,7 +93,7 @@ impl<'db> TrieDB<'db> {
r r
} }
/// Determine occurances of items in the backing database which are not related to this /// Determine occurrences of items in the backing database which are not related to this
/// trie. /// trie.
pub fn db_items_remaining(&self) -> HashMap<H256, i32> { pub fn db_items_remaining(&self) -> HashMap<H256, i32> {
let mut ret = self.db.keys(); let mut ret = self.db.keys();

View File

@ -255,7 +255,7 @@ fn hash256rlp(input: &[(Vec<u8>, Vec<u8>)], pre_len: usize, stream: &mut RlpStre
begin += len; begin += len;
} }
// if fist key len is equal prefix, append it's value // if fist key len is equal prefix, append its value
match pre_len == key.len() { match pre_len == key.len() {
true => { stream.append(&value); }, true => { stream.append(&value); },
false => { stream.append_empty_data(); } false => { stream.append_empty_data(); }