Fetch parity-common
crates from crates.io (#9410)
* Fetch `parity-common` crates from crates.io * Add doc tests from `patricia-trie` to `patricia-trie-ethereum` Fix/update a few deps * [ethkey] upgrade ethereum-types * [whisper] update deps * [network] deps * [network-devp2p] deps * [journaldb] deps * [fastmap] deps * [miner] deps and test fixes * [machine] deps * [json] deps * [hw] deps * [ethash] deps * [registrar] deps * Update a few more dependencies with new ethabi-* * [updater] Update deps * deps * [ethcore] Update deps * Use new parity-snappy and parity-rocksdb crates * Updated submodules * Use parity-snappy 0.1 * Use kvdb-rocksdb 0.1.2 * Don't use latest ethereum/tests * Fix merge conflicts errors * Remove superseeded comment * Address grumbles: add newlines, add/remove spaces
This commit is contained in:
parent
51eac1926f
commit
72fd1fa58d
2302
Cargo.lock
generated
2302
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
13
Cargo.toml
13
Cargo.toml
@ -33,7 +33,7 @@ fdlimit = "0.1"
|
||||
ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" }
|
||||
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
|
||||
ethcore = { path = "ethcore", features = ["parity"] }
|
||||
parity-bytes = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-bytes = "0.1"
|
||||
ethcore-io = { path = "util/io" }
|
||||
ethcore-light = { path = "ethcore/light" }
|
||||
ethcore-logger = { path = "logger" }
|
||||
@ -43,10 +43,10 @@ ethcore-private-tx = { path = "ethcore/private-tx" }
|
||||
ethcore-service = { path = "ethcore/service" }
|
||||
ethcore-sync = { path = "ethcore/sync" }
|
||||
ethcore-transaction = { path = "ethcore/transaction" }
|
||||
ethereum-types = "0.3"
|
||||
ethereum-types = "0.4"
|
||||
node-filter = { path = "ethcore/node_filter" }
|
||||
ethkey = { path = "ethkey" }
|
||||
rlp = { git = "https://github.com/paritytech/parity-common" }
|
||||
rlp = { version = "0.2.4", features = ["ethereum"] }
|
||||
rpc-cli = { path = "rpc_cli" }
|
||||
parity-hash-fetch = { path = "hash-fetch" }
|
||||
parity-ipfs-api = { path = "ipfs" }
|
||||
@ -57,12 +57,12 @@ parity-rpc-client = { path = "rpc_client" }
|
||||
parity-updater = { path = "updater" }
|
||||
parity-version = { path = "util/version" }
|
||||
parity-whisper = { path = "whisper" }
|
||||
path = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-path = "0.1"
|
||||
dir = { path = "util/dir" }
|
||||
panic_hook = { path = "util/panic_hook" }
|
||||
keccak-hash = { git = "https://github.com/paritytech/parity-common" }
|
||||
keccak-hash = "0.1"
|
||||
migration-rocksdb = { path = "util/migration-rocksdb" }
|
||||
kvdb = "0.1.0"
|
||||
kvdb = "0.1"
|
||||
kvdb-rocksdb = "0.1.3"
|
||||
journaldb = { path = "util/journaldb" }
|
||||
mem = { path = "util/mem" }
|
||||
@ -130,7 +130,6 @@ members = [
|
||||
"evmbin",
|
||||
"miner",
|
||||
"parity-clib",
|
||||
"transaction-pool",
|
||||
"whisper",
|
||||
"whisper/cli",
|
||||
"util/triehash-ethereum",
|
||||
|
@ -6,8 +6,8 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
[dependencies]
|
||||
crunchy = "0.1.0"
|
||||
either = "1.0.0"
|
||||
ethereum-types = "0.3"
|
||||
keccak-hash = { git = "https://github.com/paritytech/parity-common" }
|
||||
ethereum-types = "0.4"
|
||||
keccak-hash = "0.1"
|
||||
log = "0.4"
|
||||
memmap = "0.6"
|
||||
parking_lot = "0.6"
|
||||
|
@ -15,23 +15,23 @@ common-types = { path = "types" }
|
||||
crossbeam = "0.3"
|
||||
ethash = { path = "../ethash" }
|
||||
ethcore-bloom-journal = { path = "../util/bloom" }
|
||||
parity-bytes = { git = "https://github.com/paritytech/parity-common" }
|
||||
hashdb = { git = "https://github.com/paritytech/parity-common" }
|
||||
memorydb = { git = "https://github.com/paritytech/parity-common" }
|
||||
patricia-trie = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-bytes = "0.1"
|
||||
hashdb = "0.2.1"
|
||||
memorydb = "0.2.1"
|
||||
patricia-trie = "0.2.1"
|
||||
patricia-trie-ethereum = { path = "../util/patricia-trie-ethereum" }
|
||||
parity-crypto = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-crypto = "0.1"
|
||||
error-chain = { version = "0.12", default-features = false }
|
||||
ethcore-io = { path = "../util/io" }
|
||||
ethcore-logger = { path = "../logger" }
|
||||
ethcore-miner = { path = "../miner" }
|
||||
ethcore-stratum = { path = "./stratum", optional = true }
|
||||
ethcore-transaction = { path = "./transaction" }
|
||||
ethereum-types = "0.3"
|
||||
ethereum-types = "0.4"
|
||||
memory-cache = { path = "../util/memory_cache" }
|
||||
ethabi = "5.1"
|
||||
ethabi-derive = "5.0"
|
||||
ethabi-contract = "5.0"
|
||||
ethabi = "5.1.2"
|
||||
ethabi-derive = "5.1.3"
|
||||
ethabi-contract = "5.1.1"
|
||||
ethjson = { path = "../json" }
|
||||
ethkey = { path = "../ethkey" }
|
||||
ethstore = { path = "../ethstore" }
|
||||
@ -47,21 +47,21 @@ parity-machine = { path = "../machine" }
|
||||
parking_lot = "0.6"
|
||||
rayon = "1.0"
|
||||
rand = "0.4"
|
||||
rlp = { git = "https://github.com/paritytech/parity-common" }
|
||||
rlp = { version = "0.2.4", features = ["ethereum"] }
|
||||
rlp_compress = { path = "../util/rlp_compress" }
|
||||
rlp_derive = { path = "../util/rlp_derive" }
|
||||
kvdb = "0.1.0"
|
||||
kvdb-memorydb = "0.1.0"
|
||||
parity-snappy = "0.1.0"
|
||||
kvdb = "0.1"
|
||||
kvdb-memorydb = "0.1"
|
||||
parity-snappy = "0.1"
|
||||
stop-guard = { path = "../util/stop-guard" }
|
||||
macros = { path = "../util/macros" }
|
||||
rustc-hex = "1.0"
|
||||
stats = { path = "../util/stats" }
|
||||
trace-time = { path = "../util/trace-time" }
|
||||
trace-time = "0.1"
|
||||
using_queue = { path = "../util/using_queue" }
|
||||
vm = { path = "vm" }
|
||||
wasm = { path = "wasm" }
|
||||
keccak-hash = { git = "https://github.com/paritytech/parity-common" }
|
||||
keccak-hash = "0.1"
|
||||
triehash-ethereum = { version = "0.2", path = "../util/triehash-ethereum" }
|
||||
unexpected = { path = "../util/unexpected" }
|
||||
journaldb = { path = "../util/journaldb" }
|
||||
@ -77,7 +77,7 @@ fake-hardware-wallet = { path = "../util/fake-hardware-wallet" }
|
||||
|
||||
[dev-dependencies]
|
||||
tempdir = "0.3"
|
||||
trie-standardmap = { git = "https://github.com/paritytech/parity-common" }
|
||||
trie-standardmap = "0.1"
|
||||
|
||||
[features]
|
||||
parity = ["work-notify", "price-info", "stratum"]
|
||||
|
@ -5,13 +5,13 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
|
||||
[dependencies]
|
||||
bit-set = "0.4"
|
||||
parity-bytes = { git = "https://github.com/paritytech/parity-common" }
|
||||
ethereum-types = "0.3"
|
||||
parity-bytes = "0.1"
|
||||
ethereum-types = "0.4"
|
||||
heapsize = "0.4"
|
||||
lazy_static = "1.0"
|
||||
log = "0.4"
|
||||
vm = { path = "../vm" }
|
||||
keccak-hash = { git = "https://github.com/paritytech/parity-common" }
|
||||
keccak-hash = "0.1"
|
||||
parking_lot = "0.6"
|
||||
memory-cache = { path = "../../util/memory_cache" }
|
||||
|
||||
|
@ -795,7 +795,7 @@ impl<Cost: CostType> Interpreter<Cost> {
|
||||
TWO_POW_96 => a >> 96,
|
||||
TWO_POW_224 => a >> 224,
|
||||
TWO_POW_248 => a >> 248,
|
||||
_ => a.overflowing_div(b).0,
|
||||
_ => a / b,
|
||||
}
|
||||
} else {
|
||||
U256::zero()
|
||||
@ -805,7 +805,7 @@ impl<Cost: CostType> Interpreter<Cost> {
|
||||
let a = self.stack.pop_back();
|
||||
let b = self.stack.pop_back();
|
||||
self.stack.push(if !b.is_zero() {
|
||||
a.overflowing_rem(b).0
|
||||
a % b
|
||||
} else {
|
||||
U256::zero()
|
||||
});
|
||||
@ -821,7 +821,7 @@ impl<Cost: CostType> Interpreter<Cost> {
|
||||
} else if a == min && b == !U256::zero() {
|
||||
min
|
||||
} else {
|
||||
let c = a.overflowing_div(b).0;
|
||||
let c = a / b;
|
||||
set_sign(c, sign_a ^ sign_b)
|
||||
});
|
||||
},
|
||||
@ -832,7 +832,7 @@ impl<Cost: CostType> Interpreter<Cost> {
|
||||
let b = get_and_reset_sign(ub).0;
|
||||
|
||||
self.stack.push(if !b.is_zero() {
|
||||
let c = a.overflowing_rem(b).0;
|
||||
let c = a % b;
|
||||
set_sign(c, sign_a)
|
||||
} else {
|
||||
U256::zero()
|
||||
@ -920,7 +920,7 @@ impl<Cost: CostType> Interpreter<Cost> {
|
||||
// upcast to 512
|
||||
let a5 = U512::from(a);
|
||||
let res = a5.overflowing_add(U512::from(b)).0;
|
||||
let x = res.overflowing_rem(U512::from(c)).0;
|
||||
let x = res % U512::from(c);
|
||||
U256::from(x)
|
||||
} else {
|
||||
U256::zero()
|
||||
@ -934,7 +934,7 @@ impl<Cost: CostType> Interpreter<Cost> {
|
||||
self.stack.push(if !c.is_zero() {
|
||||
let a5 = U512::from(a);
|
||||
let res = a5.overflowing_mul(U512::from(b)).0;
|
||||
let x = res.overflowing_rem(U512::from(c)).0;
|
||||
let x = res % U512::from(c);
|
||||
U256::from(x)
|
||||
} else {
|
||||
U256::zero()
|
||||
|
@ -9,19 +9,19 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
[dependencies]
|
||||
log = "0.4"
|
||||
ethcore = { path = ".."}
|
||||
parity-bytes = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-bytes = "0.1"
|
||||
ethcore-transaction = { path = "../transaction" }
|
||||
ethereum-types = "0.3"
|
||||
memorydb = { git = "https://github.com/paritytech/parity-common" }
|
||||
patricia-trie = { git = "https://github.com/paritytech/parity-common" }
|
||||
ethereum-types = "0.4"
|
||||
memorydb = "0.2.1"
|
||||
patricia-trie = "0.2.1"
|
||||
patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" }
|
||||
ethcore-network = { path = "../../util/network" }
|
||||
ethcore-io = { path = "../../util/io" }
|
||||
hashdb = { git = "https://github.com/paritytech/parity-common" }
|
||||
hashdb = "0.2.1"
|
||||
heapsize = "0.4"
|
||||
vm = { path = "../vm" }
|
||||
fastmap = { path = "../../util/fastmap" }
|
||||
rlp = { git = "https://github.com/paritytech/parity-common" }
|
||||
rlp = { version = "0.2.4", features = ["ethereum"] }
|
||||
rlp_derive = { path = "../../util/rlp_derive" }
|
||||
smallvec = "0.4"
|
||||
futures = "0.1"
|
||||
@ -32,16 +32,16 @@ serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
parking_lot = "0.6"
|
||||
stats = { path = "../../util/stats" }
|
||||
keccak-hash = { git = "https://github.com/paritytech/parity-common" }
|
||||
keccak-hash = "0.1"
|
||||
keccak-hasher = { path = "../../util/keccak-hasher" }
|
||||
triehash-ethereum = { version = "0.2", path = "../../util/triehash-ethereum" }
|
||||
kvdb = "0.1.0"
|
||||
kvdb = "0.1"
|
||||
memory-cache = { path = "../../util/memory_cache" }
|
||||
error-chain = { version = "0.12", default-features = false }
|
||||
|
||||
[dev-dependencies]
|
||||
ethcore = { path = "..", features = ["test-helpers"] }
|
||||
kvdb-memorydb = "0.1.0"
|
||||
kvdb-memorydb = "0.1"
|
||||
tempdir = "0.3"
|
||||
|
||||
[features]
|
||||
|
@ -95,7 +95,7 @@ impl AccountTransactions {
|
||||
}
|
||||
|
||||
fn next_nonce(&self) -> U256 {
|
||||
self.current.last().map(|last| last.nonce + 1.into())
|
||||
self.current.last().map(|last| last.nonce + 1)
|
||||
.unwrap_or_else(|| *self.cur_nonce.value())
|
||||
}
|
||||
|
||||
@ -113,7 +113,7 @@ impl AccountTransactions {
|
||||
None => break,
|
||||
}
|
||||
|
||||
next_nonce = next_nonce + 1.into();
|
||||
next_nonce = next_nonce + 1;
|
||||
}
|
||||
|
||||
promoted
|
||||
@ -196,7 +196,7 @@ impl TransactionQueue {
|
||||
}
|
||||
Err(idx) => {
|
||||
let cur_len = acct_txs.current.len();
|
||||
let incr_nonce = nonce + 1.into();
|
||||
let incr_nonce = nonce + 1;
|
||||
|
||||
// current is sorted with one tx per nonce,
|
||||
// so if a tx with given nonce wasn't found that means it is either
|
||||
@ -215,7 +215,7 @@ impl TransactionQueue {
|
||||
}
|
||||
|
||||
(ImportDestination::Current, vec![hash])
|
||||
} else if idx == cur_len && acct_txs.current.last().map_or(false, |f| f.nonce + 1.into() != nonce) {
|
||||
} else if idx == cur_len && acct_txs.current.last().map_or(false, |f| f.nonce + 1 != nonce) {
|
||||
trace!(target: "txqueue", "Queued future transaction for {}, nonce={}", sender, nonce);
|
||||
let future_nonce = nonce;
|
||||
acct_txs.future.insert(future_nonce, tx_info);
|
||||
@ -535,7 +535,7 @@ mod tests {
|
||||
let tx_b: PendingTransaction = Transaction::default().fake_sign(sender).into();
|
||||
let tx_a: PendingTransaction = {
|
||||
let mut tx_a = Transaction::default();
|
||||
tx_a.gas_price = tx_b.gas_price + 1.into();
|
||||
tx_a.gas_price = tx_b.gas_price + 1;
|
||||
tx_a.fake_sign(sender).into()
|
||||
};
|
||||
|
||||
|
@ -10,16 +10,16 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
ethcore = { path = ".."}
|
||||
ethcore-network = { path = "../../util/network" }
|
||||
ethcore-network-devp2p = { path = "../../util/network-devp2p" }
|
||||
ethereum-types = "0.3"
|
||||
ethereum-types = "0.4"
|
||||
log = "0.4"
|
||||
parking_lot = "0.6"
|
||||
ethabi = "5.1"
|
||||
ethabi-derive = "5.0"
|
||||
ethabi-contract = "5.0"
|
||||
ethabi = "5.1.2"
|
||||
ethabi-derive = "5.1.3"
|
||||
ethabi-contract = "5.1.1"
|
||||
lru-cache = "0.1"
|
||||
|
||||
[dev-dependencies]
|
||||
ethcore = { path = "..", features = ["test-helpers"] }
|
||||
kvdb-memorydb = "0.1.0"
|
||||
kvdb-memorydb = "0.1"
|
||||
ethcore-io = { path = "../../util/io" }
|
||||
tempdir = "0.3"
|
||||
|
@ -7,36 +7,36 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
|
||||
[dependencies]
|
||||
error-chain = { version = "0.12", default-features = false }
|
||||
ethabi = "5.1"
|
||||
ethabi-contract = "5.0"
|
||||
ethabi-derive = "5.0"
|
||||
ethabi = "5.1.2"
|
||||
ethabi-derive = "5.1.3"
|
||||
ethabi-contract = "5.1.1"
|
||||
ethcore = { path = ".." }
|
||||
parity-bytes = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-crypto = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-bytes = "0.1"
|
||||
parity-crypto = "0.1"
|
||||
ethcore-io = { path = "../../util/io" }
|
||||
ethcore-logger = { path = "../../logger" }
|
||||
ethcore-miner = { path = "../../miner" }
|
||||
ethcore-transaction = { path = "../transaction" }
|
||||
ethereum-types = "0.3"
|
||||
ethereum-types = "0.4"
|
||||
ethjson = { path = "../../json" }
|
||||
ethkey = { path = "../../ethkey" }
|
||||
fetch = { path = "../../util/fetch" }
|
||||
futures = "0.1"
|
||||
heapsize = "0.4"
|
||||
keccak-hash = { git = "https://github.com/paritytech/parity-common" }
|
||||
keccak-hash = "0.1.2"
|
||||
log = "0.4"
|
||||
parking_lot = "0.6"
|
||||
patricia-trie = { git = "https://github.com/paritytech/parity-common" }
|
||||
patricia-trie = "0.2.1"
|
||||
patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" }
|
||||
rand = "0.3"
|
||||
rlp = { git = "https://github.com/paritytech/parity-common" }
|
||||
rlp = { version = "0.2.4", features = ["ethereum"] }
|
||||
rlp_derive = { path = "../../util/rlp_derive" }
|
||||
rustc-hex = "1.0"
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
tiny-keccak = "1.4"
|
||||
transaction-pool = { path = "../../transaction-pool" }
|
||||
transaction-pool = "1.13.2"
|
||||
url = "1"
|
||||
|
||||
[dev-dependencies]
|
||||
|
@ -1 +1 @@
|
||||
Subproject commit 0edbf860ff7ed4b6b6336097ba44836e8c6482dd
|
||||
Subproject commit d17bfb6962041c4ac7f82eb79f72eef8d42f9447
|
@ -10,11 +10,11 @@ ethcore = { path = ".." }
|
||||
ethcore-io = { path = "../../util/io" }
|
||||
ethcore-private-tx = { path = "../private-tx" }
|
||||
ethcore-sync = { path = "../sync" }
|
||||
ethereum-types = "0.3"
|
||||
kvdb = "0.1.0"
|
||||
ethereum-types = "0.4"
|
||||
kvdb = "0.1"
|
||||
log = "0.4"
|
||||
stop-guard = { path = "../../util/stop-guard" }
|
||||
trace-time = { path = "../../util/trace-time" }
|
||||
trace-time = "0.1"
|
||||
|
||||
[dev-dependencies]
|
||||
ethcore = { path = "..", features = ["test-helpers"] }
|
||||
|
@ -1508,7 +1508,7 @@ impl Call for Client {
|
||||
where F: FnMut(U256) -> Result<bool, E>
|
||||
{
|
||||
while upper - lower > 1.into() {
|
||||
let mid = (lower + upper) / 2.into();
|
||||
let mid = (lower + upper) / 2;
|
||||
trace!(target: "estimate_gas", "{} .. {} .. {}", lower, mid, upper);
|
||||
let c = cond(mid)?;
|
||||
match c {
|
||||
@ -2510,7 +2510,7 @@ mod tests {
|
||||
block_hash: block_hash,
|
||||
block_number: block_number,
|
||||
cumulative_gas_used: gas_used,
|
||||
gas_used: gas_used - 5.into(),
|
||||
gas_used: gas_used - 5,
|
||||
contract_address: None,
|
||||
logs: vec![LocalizedLogEntry {
|
||||
entry: logs[0].clone(),
|
||||
|
@ -1919,7 +1919,7 @@ mod tests {
|
||||
let b2 = b2.close_and_lock().unwrap();
|
||||
|
||||
// the spec sets the block reward to 10
|
||||
assert_eq!(b2.block().state().balance(&addr1).unwrap(), addr1_balance + (10 * 2).into())
|
||||
assert_eq!(b2.block().state().balance(&addr1).unwrap(), addr1_balance + (10 * 2))
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -2067,7 +2067,7 @@ mod tests {
|
||||
// the contract rewards (1000 + kind) for each benefactor/reward kind
|
||||
assert_eq!(
|
||||
b2.block().state().balance(&addr1).unwrap(),
|
||||
addr1_balance + (1000 + 0).into() + (1000 + 2).into(),
|
||||
addr1_balance + (1000 + 0) + (1000 + 2),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -454,8 +454,8 @@ impl Engine<EthereumMachine> for Tendermint {
|
||||
fn populate_from_parent(&self, header: &mut Header, parent: &Header) {
|
||||
// Chain scoring: total weight is sqrt(U256::max_value())*height - view
|
||||
let new_difficulty = U256::from(U128::max_value())
|
||||
+ consensus_view(parent).expect("Header has been verified; qed").into()
|
||||
- self.view.load(AtomicOrdering::SeqCst).into();
|
||||
+ consensus_view(parent).expect("Header has been verified; qed")
|
||||
- self.view.load(AtomicOrdering::SeqCst);
|
||||
|
||||
header.set_difficulty(new_difficulty);
|
||||
}
|
||||
|
@ -233,8 +233,8 @@ impl EthereumMachine {
|
||||
if let Some(ref ethash_params) = self.ethash_extensions {
|
||||
let gas_limit = {
|
||||
let bound_divisor = self.params().gas_limit_bound_divisor;
|
||||
let lower_limit = gas_limit - gas_limit / bound_divisor + 1.into();
|
||||
let upper_limit = gas_limit + gas_limit / bound_divisor - 1.into();
|
||||
let lower_limit = gas_limit - gas_limit / bound_divisor + 1;
|
||||
let upper_limit = gas_limit + gas_limit / bound_divisor - 1;
|
||||
let gas_limit = if gas_limit < gas_floor_target {
|
||||
let gas_limit = cmp::min(gas_floor_target, upper_limit);
|
||||
round_block_gas_limit(gas_limit, lower_limit, upper_limit)
|
||||
@ -245,7 +245,7 @@ impl EthereumMachine {
|
||||
let total_lower_limit = cmp::max(lower_limit, gas_floor_target);
|
||||
let total_upper_limit = cmp::min(upper_limit, gas_ceil_target);
|
||||
let gas_limit = cmp::max(gas_floor_target, cmp::min(total_upper_limit,
|
||||
lower_limit + (header.gas_used().clone() * 6u32 / 5.into()) / bound_divisor));
|
||||
lower_limit + (header.gas_used().clone() * 6u32 / 5) / bound_divisor));
|
||||
round_block_gas_limit(gas_limit, total_lower_limit, total_upper_limit)
|
||||
};
|
||||
// ensure that we are not violating protocol limits
|
||||
@ -265,9 +265,9 @@ impl EthereumMachine {
|
||||
header.set_gas_limit({
|
||||
let bound_divisor = self.params().gas_limit_bound_divisor;
|
||||
if gas_limit < gas_floor_target {
|
||||
cmp::min(gas_floor_target, gas_limit + gas_limit / bound_divisor - 1.into())
|
||||
cmp::min(gas_floor_target, gas_limit + gas_limit / bound_divisor - 1)
|
||||
} else {
|
||||
cmp::max(gas_floor_target, gas_limit - gas_limit / bound_divisor + 1.into())
|
||||
cmp::max(gas_floor_target, gas_limit - gas_limit / bound_divisor + 1)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -852,11 +852,11 @@ impl miner::MinerService for Miner {
|
||||
|
||||
fn sensible_gas_price(&self) -> U256 {
|
||||
// 10% above our minimum.
|
||||
self.transaction_queue.current_worst_gas_price() * 110u32 / 100.into()
|
||||
self.transaction_queue.current_worst_gas_price() * 110u32 / 100
|
||||
}
|
||||
|
||||
fn sensible_gas_limit(&self) -> U256 {
|
||||
self.params.read().gas_range_target.0 / 5.into()
|
||||
self.params.read().gas_range_target.0 / 5
|
||||
}
|
||||
|
||||
fn import_external_transactions<C: miner::BlockChainClient>(
|
||||
|
@ -131,7 +131,7 @@ fn make_chain(accounts: Arc<AccountProvider>, blocks_beyond: usize, transitions:
|
||||
data: Vec::new(),
|
||||
}.sign(&*RICH_SECRET, client.signing_chain_id());
|
||||
|
||||
*nonce = *nonce + 1.into();
|
||||
*nonce = *nonce + 1;
|
||||
vec![transaction]
|
||||
};
|
||||
|
||||
@ -174,7 +174,7 @@ fn make_chain(accounts: Arc<AccountProvider>, blocks_beyond: usize, transitions:
|
||||
data,
|
||||
}.sign(&*RICH_SECRET, client.signing_chain_id());
|
||||
|
||||
*nonce = *nonce + 1.into();
|
||||
*nonce = *nonce + 1;
|
||||
vec![transaction]
|
||||
} else {
|
||||
make_useless_transactions()
|
||||
|
@ -672,7 +672,7 @@ mod tests {
|
||||
header.set_uncles_hash(good_uncles_hash.clone());
|
||||
check_ok(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine));
|
||||
|
||||
header.set_gas_limit(min_gas_limit - From::from(1));
|
||||
header.set_gas_limit(min_gas_limit - 1);
|
||||
check_fail(basic_test(&create_test_block(&header), engine),
|
||||
InvalidGasLimit(OutOfBounds { min: Some(min_gas_limit), max: None, found: header.gas_limit().clone() }));
|
||||
|
||||
@ -682,7 +682,7 @@ mod tests {
|
||||
RidiculousNumber(OutOfBounds { max: Some(BlockNumber::max_value()), min: None, found: header.number() }));
|
||||
|
||||
header = good.clone();
|
||||
let gas_used = header.gas_limit().clone() + 1.into();
|
||||
let gas_used = header.gas_limit().clone() + 1;
|
||||
header.set_gas_used(gas_used);
|
||||
check_fail(basic_test(&create_test_block(&header), engine),
|
||||
TooMuchGasUsed(OutOfBounds { max: Some(header.gas_limit().clone()), min: None, found: header.gas_used().clone() }));
|
||||
|
@ -6,8 +6,8 @@ license = "GPL-3.0"
|
||||
authors = ["Parity Technologies <admin@parity.io>"]
|
||||
|
||||
[dependencies]
|
||||
ethereum-types = "0.3"
|
||||
keccak-hash = { git = "https://github.com/paritytech/parity-common" }
|
||||
ethereum-types = "0.4"
|
||||
keccak-hash = "0.1"
|
||||
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
|
||||
jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
|
||||
jsonrpc-tcp-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
|
||||
|
@ -9,22 +9,22 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
|
||||
[dependencies]
|
||||
common-types = { path = "../types" }
|
||||
parity-bytes = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-bytes = "0.1"
|
||||
ethcore-network = { path = "../../util/network" }
|
||||
ethcore-network-devp2p = { path = "../../util/network-devp2p" }
|
||||
ethcore-io = { path = "../../util/io" }
|
||||
ethcore-light = { path = "../light" }
|
||||
ethcore-transaction = { path = "../transaction" }
|
||||
ethcore = { path = ".." }
|
||||
ethereum-types = "0.3"
|
||||
hashdb = { git = "https://github.com/paritytech/parity-common" }
|
||||
ethereum-types = "0.4"
|
||||
hashdb = "0.2.1"
|
||||
fastmap = { path = "../../util/fastmap" }
|
||||
rlp = { git = "https://github.com/paritytech/parity-common" }
|
||||
rlp = { version = "0.2.4", features = ["ethereum"] }
|
||||
rustc-hex = "1.0"
|
||||
keccak-hash = { git = "https://github.com/paritytech/parity-common" }
|
||||
keccak-hash = "0.1"
|
||||
keccak-hasher = { path = "../../util/keccak-hasher" }
|
||||
triehash-ethereum = {version = "0.2", path = "../../util/triehash-ethereum" }
|
||||
kvdb = "0.1.0"
|
||||
kvdb = "0.1"
|
||||
macros = { path = "../../util/macros" }
|
||||
log = "0.4"
|
||||
env_logger = "0.5"
|
||||
@ -33,12 +33,12 @@ heapsize = "0.4"
|
||||
semver = "0.9"
|
||||
smallvec = { version = "0.4", features = ["heapsizeof"] }
|
||||
parking_lot = "0.6"
|
||||
trace-time = { path = "../../util/trace-time" }
|
||||
trace-time = "0.1"
|
||||
ipnetwork = "0.12.6"
|
||||
|
||||
[dev-dependencies]
|
||||
ethcore-io = { path = "../../util/io", features = ["mio"] }
|
||||
ethkey = { path = "../../ethkey" }
|
||||
kvdb-memorydb = "0.1.0"
|
||||
kvdb-memorydb = "0.1"
|
||||
ethcore-private-tx = { path = "../private-tx" }
|
||||
ethcore = { path = "..", features = ["test-helpers"] }
|
||||
|
@ -9,10 +9,10 @@ ethjson = { path = "../../json" }
|
||||
ethkey = { path = "../../ethkey" }
|
||||
evm = { path = "../evm" }
|
||||
heapsize = "0.4"
|
||||
keccak-hash = { git = "https://github.com/paritytech/parity-common" }
|
||||
rlp = { git = "https://github.com/paritytech/parity-common" }
|
||||
keccak-hash = "0.1"
|
||||
rlp = { version = "0.2.4", features = ["ethereum"] }
|
||||
unexpected = { path = "../../util/unexpected" }
|
||||
ethereum-types = "0.3"
|
||||
ethereum-types = "0.4"
|
||||
|
||||
[dev-dependencies]
|
||||
rustc-hex= "1.0"
|
||||
|
@ -5,12 +5,12 @@ version = "0.1.0"
|
||||
authors = ["Parity Technologies <admin@parity.io>"]
|
||||
|
||||
[dependencies]
|
||||
rlp = { git = "https://github.com/paritytech/parity-common" }
|
||||
rlp = { version = "0.2.4", features = ["ethereum"] }
|
||||
rlp_derive = { path = "../../util/rlp_derive" }
|
||||
parity-bytes = { git = "https://github.com/paritytech/parity-common" }
|
||||
ethereum-types = "0.3"
|
||||
parity-bytes = "0.1"
|
||||
ethereum-types = "0.4"
|
||||
ethjson = { path = "../../json" }
|
||||
keccak-hash = { git = "https://github.com/paritytech/parity-common" }
|
||||
keccak-hash = "0.1"
|
||||
heapsize = "0.4"
|
||||
|
||||
[dev-dependencies]
|
||||
|
@ -5,12 +5,12 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
|
||||
[dependencies]
|
||||
byteorder = "1.0"
|
||||
parity-bytes = { git = "https://github.com/paritytech/parity-common" }
|
||||
ethereum-types = "0.3"
|
||||
patricia-trie = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-bytes = "0.1"
|
||||
ethereum-types = "0.4"
|
||||
patricia-trie = "0.2.1"
|
||||
patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" }
|
||||
log = "0.4"
|
||||
common-types = { path = "../types" }
|
||||
ethjson = { path = "../../json" }
|
||||
rlp = { git = "https://github.com/paritytech/parity-common" }
|
||||
keccak-hash = { git = "https://github.com/paritytech/parity-common" }
|
||||
rlp = { version = "0.2.4", features = ["ethereum"] }
|
||||
keccak-hash = "0.1"
|
||||
|
@ -5,7 +5,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
|
||||
[dependencies]
|
||||
byteorder = "1.0"
|
||||
ethereum-types = "0.3"
|
||||
ethereum-types = "0.4"
|
||||
log = "0.4"
|
||||
parity-wasm = "0.31"
|
||||
libc = "0.2"
|
||||
|
@ -7,7 +7,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
serde = "1"
|
||||
serde_json = "1"
|
||||
serde_derive = "1"
|
||||
ethereum-types = "0.3"
|
||||
ethereum-types = "0.4"
|
||||
ethjson = { path = "../../../json" }
|
||||
vm = { path = "../../vm" }
|
||||
wasm = { path = "../" }
|
||||
|
@ -6,9 +6,9 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
[dependencies]
|
||||
byteorder = "1.0"
|
||||
edit-distance = "2.0"
|
||||
parity-crypto = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-crypto = "0.1"
|
||||
eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" }
|
||||
ethereum-types = "0.3"
|
||||
ethereum-types = "0.4"
|
||||
lazy_static = "1.0"
|
||||
log = "0.4"
|
||||
mem = { path = "../util/mem" }
|
||||
|
@ -16,8 +16,8 @@ tiny-keccak = "1.4"
|
||||
time = "0.1.34"
|
||||
itertools = "0.5"
|
||||
parking_lot = "0.6"
|
||||
parity-crypto = { git = "https://github.com/paritytech/parity-common" }
|
||||
ethereum-types = "0.3"
|
||||
parity-crypto = "0.1"
|
||||
ethereum-types = "0.4"
|
||||
dir = { path = "../util/dir" }
|
||||
smallvec = "0.4"
|
||||
parity-wordlist = "1.0"
|
||||
|
@ -13,9 +13,9 @@ docopt = "0.8"
|
||||
env_logger = "0.5"
|
||||
ethcore = { path = "../ethcore", features = ["test-helpers", "json-tests"] }
|
||||
ethjson = { path = "../json" }
|
||||
parity-bytes = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-bytes = "0.1"
|
||||
ethcore-transaction = { path = "../ethcore/transaction" }
|
||||
ethereum-types = "0.3"
|
||||
ethereum-types = "0.4"
|
||||
evm = { path = "../ethcore/evm" }
|
||||
panic_hook = { path = "../util/panic_hook" }
|
||||
rustc-hex = "1.0"
|
||||
|
@ -15,15 +15,15 @@ mime_guess = "2.0.0-alpha.2"
|
||||
rand = "0.4"
|
||||
rustc-hex = "1.0"
|
||||
fetch = { path = "../util/fetch" }
|
||||
parity-bytes = { git = "https://github.com/paritytech/parity-common" }
|
||||
ethereum-types = "0.3"
|
||||
parity-bytes = "0.1"
|
||||
ethereum-types = "0.4"
|
||||
parity-reactor = { path = "../util/reactor" }
|
||||
keccak-hash = { git = "https://github.com/paritytech/parity-common" }
|
||||
keccak-hash = "0.1"
|
||||
registrar = { path = "../registrar" }
|
||||
|
||||
ethabi = "5.1"
|
||||
ethabi-derive = "5.0"
|
||||
ethabi-contract = "5.0"
|
||||
ethabi = "5.1.2"
|
||||
ethabi-derive = "5.1.3"
|
||||
ethabi-contract = "5.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
hyper = "0.11"
|
||||
|
@ -14,7 +14,7 @@ hidapi = { git = "https://github.com/paritytech/hidapi-rs" }
|
||||
libusb = { git = "https://github.com/paritytech/libusb-rs" }
|
||||
trezor-sys = { git = "https://github.com/paritytech/trezor-sys" }
|
||||
ethkey = { path = "../ethkey" }
|
||||
ethereum-types = "0.3"
|
||||
ethereum-types = "0.4"
|
||||
semver = "0.9"
|
||||
|
||||
[dev-dependencies]
|
||||
|
@ -7,11 +7,11 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
|
||||
[dependencies]
|
||||
ethcore = { path = "../ethcore" }
|
||||
parity-bytes = { git = "https://github.com/paritytech/parity-common" }
|
||||
ethereum-types = "0.3"
|
||||
parity-bytes = "0.1"
|
||||
ethereum-types = "0.4"
|
||||
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
|
||||
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
|
||||
rlp = { git = "https://github.com/paritytech/parity-common" }
|
||||
rlp = { version = "0.2.4", features = ["ethereum"] }
|
||||
cid = "0.2"
|
||||
multihash = "0.7"
|
||||
unicase = "2.0"
|
||||
|
@ -4,7 +4,7 @@ version = "0.1.0"
|
||||
authors = ["Parity Technologies <admin@parity.io>"]
|
||||
|
||||
[dependencies]
|
||||
ethereum-types = "0.3"
|
||||
ethereum-types = "0.4"
|
||||
rustc-hex = "1.0"
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
|
@ -8,9 +8,9 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
ethcore = { path = "../ethcore" }
|
||||
ethcore-io = { path = "../util/io" }
|
||||
ethcore-transaction = { path = "../ethcore/transaction" }
|
||||
kvdb = "0.1.0"
|
||||
kvdb = "0.1"
|
||||
log = "0.4"
|
||||
rlp = { git = "https://github.com/paritytech/parity-common" }
|
||||
rlp = { version = "0.2.4", features = ["ethereum"] }
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
@ -18,4 +18,4 @@ serde_json = "1.0"
|
||||
[dev-dependencies]
|
||||
ethcore = { path = "../ethcore", features = ["test-helpers"] }
|
||||
ethkey = { path = "../ethkey" }
|
||||
kvdb-memorydb = "0.1.0"
|
||||
kvdb-memorydb = "0.1"
|
||||
|
@ -5,4 +5,4 @@ description = "Generalization of a state machine for consensus engines"
|
||||
authors = ["Parity Technologies <admin@parity.io>"]
|
||||
|
||||
[dependencies]
|
||||
ethereum-types = "0.3"
|
||||
ethereum-types = "0.4"
|
||||
|
@ -18,18 +18,18 @@ url = { version = "1", optional = true }
|
||||
ansi_term = "0.10"
|
||||
error-chain = "0.12"
|
||||
ethcore-transaction = { path = "../ethcore/transaction" }
|
||||
ethereum-types = "0.3"
|
||||
ethereum-types = "0.4"
|
||||
futures = "0.1"
|
||||
futures-cpupool = "0.1"
|
||||
heapsize = "0.4"
|
||||
keccak-hash = { git = "https://github.com/paritytech/parity-common" }
|
||||
keccak-hash = "0.1"
|
||||
linked-hash-map = "0.5"
|
||||
log = "0.4"
|
||||
parking_lot = "0.6"
|
||||
price-info = { path = "../price-info", optional = true }
|
||||
rlp = { git = "https://github.com/paritytech/parity-common" }
|
||||
trace-time = { path = "../util/trace-time" }
|
||||
transaction-pool = { path = "../transaction-pool" }
|
||||
rlp = { version = "0.2.4", features = ["ethereum"] }
|
||||
trace-time = "0.1"
|
||||
transaction-pool = "1.13"
|
||||
|
||||
[dev-dependencies]
|
||||
env_logger = "0.5"
|
||||
|
@ -72,7 +72,7 @@ impl fmt::Display for Status {
|
||||
senders = self.status.senders,
|
||||
mem = self.status.mem_usage / 1024,
|
||||
mem_max = self.limits.max_mem_usage / 1024,
|
||||
gp = self.options.minimal_gas_price / 1_000_000.into(),
|
||||
gp = self.options.minimal_gas_price / 1_000_000,
|
||||
max_gas = cmp::min(self.options.block_gas_limit, self.options.tx_gas_limit),
|
||||
)
|
||||
}
|
||||
@ -468,7 +468,7 @@ impl TransactionQueue {
|
||||
|
||||
self.pool.read().pending_from_sender(state_readiness, address)
|
||||
.last()
|
||||
.map(|tx| tx.signed().nonce + 1.into())
|
||||
.map(|tx| tx.signed().nonce + 1)
|
||||
}
|
||||
|
||||
/// Retrieve a transaction from the pool.
|
||||
|
@ -95,7 +95,7 @@ impl<C: NonceClient> txpool::Ready<VerifiedTransaction> for State<C> {
|
||||
},
|
||||
cmp::Ordering::Less => txpool::Readiness::Stale,
|
||||
cmp::Ordering::Equal => {
|
||||
*nonce = *nonce + 1.into();
|
||||
*nonce = *nonce + 1;
|
||||
txpool::Readiness::Ready
|
||||
},
|
||||
}
|
||||
@ -159,7 +159,7 @@ impl<C: Fn(&Address) -> Option<U256>> txpool::Ready<VerifiedTransaction> for Opt
|
||||
cmp::Ordering::Greater => txpool::Readiness::Future,
|
||||
cmp::Ordering::Less => txpool::Readiness::Stale,
|
||||
cmp::Ordering::Equal => {
|
||||
*nonce = *nonce + 1.into();
|
||||
*nonce = *nonce + 1;
|
||||
txpool::Readiness::Ready
|
||||
},
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ fn should_return_correct_nonces_when_dropped_because_of_limit() {
|
||||
assert_eq!(txq.status().status.transaction_count, 1);
|
||||
|
||||
// then
|
||||
assert_eq!(txq.next_nonce(TestClient::new(), &sender), Some(nonce + 1.into()));
|
||||
assert_eq!(txq.next_nonce(TestClient::new(), &sender), Some(nonce + 1));
|
||||
|
||||
// when
|
||||
let tx1 = Tx::gas_price(2).signed();
|
||||
@ -123,7 +123,7 @@ fn should_never_drop_local_transactions_from_different_senders() {
|
||||
assert_eq!(txq.status().status.transaction_count, 2);
|
||||
|
||||
// then
|
||||
assert_eq!(txq.next_nonce(TestClient::new(), &sender), Some(nonce + 2.into()));
|
||||
assert_eq!(txq.next_nonce(TestClient::new(), &sender), Some(nonce + 2));
|
||||
|
||||
// when
|
||||
let tx1 = Tx::gas_price(2).signed();
|
||||
@ -137,7 +137,7 @@ fn should_never_drop_local_transactions_from_different_senders() {
|
||||
assert_eq!(res, vec![Ok(()), Ok(())]);
|
||||
assert_eq!(res2, vec![Ok(()), Ok(())]);
|
||||
assert_eq!(txq.status().status.transaction_count, 6);
|
||||
assert_eq!(txq.next_nonce(TestClient::new(), &sender), Some(nonce + 2.into()));
|
||||
assert_eq!(txq.next_nonce(TestClient::new(), &sender), Some(nonce + 2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -604,7 +604,7 @@ fn should_return_correct_nonce_when_transactions_from_given_address_exist() {
|
||||
txq.import(TestClient::new(), vec![tx.local()]);
|
||||
|
||||
// then
|
||||
assert_eq!(txq.next_nonce(TestClient::new(), &from), Some(nonce + 1.into()));
|
||||
assert_eq!(txq.next_nonce(TestClient::new(), &from), Some(nonce + 1 ));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -476,8 +476,8 @@ fn execute_import(cmd: ImportBlockchain) -> Result<(), String> {
|
||||
(report.blocks_imported * 1000) as u64 / ms,
|
||||
report.transactions_applied,
|
||||
(report.transactions_applied * 1000) as u64 / ms,
|
||||
report.gas_processed / From::from(1_000_000),
|
||||
(report.gas_processed / From::from(ms * 1000)).low_u64(),
|
||||
report.gas_processed / 1_000_000,
|
||||
(report.gas_processed / (ms * 1000)).low_u64(),
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
@ -306,7 +306,7 @@ impl<T: InformantData> Informant<T> {
|
||||
format!("{} blk/s {} tx/s {} Mgas/s",
|
||||
paint(Yellow.bold(), format!("{:7.2}", (client_report.blocks_imported * 1000) as f64 / elapsed.as_milliseconds() as f64)),
|
||||
paint(Yellow.bold(), format!("{:6.1}", (client_report.transactions_applied * 1000) as f64 / elapsed.as_milliseconds() as f64)),
|
||||
paint(Yellow.bold(), format!("{:4}", (client_report.gas_processed / From::from(elapsed.as_milliseconds() * 1000)).low_u64()))
|
||||
paint(Yellow.bold(), format!("{:4}", (client_report.gas_processed / (elapsed.as_milliseconds() * 1000)).low_u64()))
|
||||
)
|
||||
} else {
|
||||
format!("{} hdr/s",
|
||||
|
@ -65,7 +65,7 @@ extern crate parity_rpc;
|
||||
extern crate parity_updater as updater;
|
||||
extern crate parity_version;
|
||||
extern crate parity_whisper;
|
||||
extern crate path;
|
||||
extern crate parity_path as path;
|
||||
extern crate rpc_cli;
|
||||
extern crate node_filter;
|
||||
extern crate keccak_hash as hash;
|
||||
|
@ -7,7 +7,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
|
||||
[dependencies]
|
||||
futures = "0.1"
|
||||
ethabi = "5.1.0"
|
||||
ethabi-derive = "5.0.5"
|
||||
ethabi-contract = "5.0.3"
|
||||
keccak-hash = { git = "https://github.com/paritytech/parity-common" }
|
||||
ethabi = "5.1.2"
|
||||
ethabi-derive = "5.1.3"
|
||||
ethabi-contract = "5.1.1"
|
||||
keccak-hash = "0.1"
|
||||
|
@ -52,7 +52,8 @@ impl Registrar {
|
||||
};
|
||||
|
||||
let address_fetcher = self.registrar.functions().get_address();
|
||||
let id = address_fetcher.input(keccak(key), DNS_A_RECORD);
|
||||
let hashed_key: [u8; 32] = keccak(key).into();
|
||||
let id = address_fetcher.input(hashed_key, DNS_A_RECORD);
|
||||
|
||||
let future = self.client.call_contract(registrar_address, id).and_then(move |address| {
|
||||
address_fetcher.output(&address)
|
||||
|
@ -37,9 +37,9 @@ jsonrpc-pubsub = { git = "https://github.com/paritytech/jsonrpc.git", branch = "
|
||||
|
||||
ethash = { path = "../ethash" }
|
||||
ethcore = { path = "../ethcore", features = ["test-helpers"] }
|
||||
parity-bytes = "0.1"
|
||||
parity-crypto = "0.1"
|
||||
fastmap = { path = "../util/fastmap" }
|
||||
parity-bytes = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-crypto = { git = "https://github.com/paritytech/parity-common" }
|
||||
ethcore-devtools = { path = "../devtools" }
|
||||
ethcore-io = { path = "../util/io" }
|
||||
ethcore-light = { path = "../ethcore/light" }
|
||||
@ -48,18 +48,18 @@ ethcore-miner = { path = "../miner" }
|
||||
ethcore-private-tx = { path = "../ethcore/private-tx" }
|
||||
ethcore-sync = { path = "../ethcore/sync" }
|
||||
ethcore-transaction = { path = "../ethcore/transaction" }
|
||||
ethereum-types = "0.3.2"
|
||||
ethereum-types = "0.4"
|
||||
|
||||
ethjson = { path = "../json" }
|
||||
ethkey = { path = "../ethkey" }
|
||||
ethstore = { path = "../ethstore" }
|
||||
fetch = { path = "../util/fetch" }
|
||||
keccak-hash = { git = "https://github.com/paritytech/parity-common" }
|
||||
keccak-hash = "0.1.2"
|
||||
parity-reactor = { path = "../util/reactor" }
|
||||
parity-updater = { path = "../updater" }
|
||||
parity-version = { path = "../util/version" }
|
||||
patricia-trie = { git = "https://github.com/paritytech/parity-common" }
|
||||
rlp = { git = "https://github.com/paritytech/parity-common" }
|
||||
patricia-trie = "0.2.1"
|
||||
rlp = { version = "0.2.4", features = ["ethereum"] }
|
||||
stats = { path = "../util/stats" }
|
||||
vm = { path = "../ethcore/vm" }
|
||||
|
||||
@ -73,7 +73,7 @@ fake-hardware-wallet = { path = "../util/fake-hardware-wallet" }
|
||||
ethcore = { path = "../ethcore", features = ["test-helpers"] }
|
||||
ethcore-network = { path = "../util/network" }
|
||||
fake-fetch = { path = "../util/fake-fetch" }
|
||||
kvdb-memorydb = "0.1.0"
|
||||
kvdb-memorydb = "0.1"
|
||||
macros = { path = "../util/macros" }
|
||||
pretty_assertions = "0.1"
|
||||
transaction-pool = { path = "../transaction-pool" }
|
||||
transaction-pool = "1.13"
|
||||
|
@ -346,7 +346,7 @@ impl Dispatcher for LightDispatcher {
|
||||
to: request.to,
|
||||
nonce: request.nonce,
|
||||
gas_price: gas_price,
|
||||
gas: request.gas.unwrap_or_else(|| gas_limit / 3.into()),
|
||||
gas: request.gas.unwrap_or_else(|| gas_limit / 3),
|
||||
value: request.value.unwrap_or_else(|| 0.into()),
|
||||
data: request.data.unwrap_or_else(Vec::new),
|
||||
condition: request.condition,
|
||||
|
@ -24,7 +24,7 @@ pub fn sign_call(request: CallRequest) -> Result<SignedTransaction, Error> {
|
||||
let max_gas = U256::from(50_000_000);
|
||||
let gas = match request.gas {
|
||||
Some(gas) => gas,
|
||||
None => max_gas * 10,
|
||||
None => max_gas * 10u32,
|
||||
};
|
||||
let from = request.from.unwrap_or(0.into());
|
||||
|
||||
|
@ -104,8 +104,8 @@ impl SenderReservations {
|
||||
pub fn reserve_nonce(&mut self, minimal: U256) -> Reserved {
|
||||
// Update prospective value
|
||||
let dropped = self.dropped.swap(0, atomic::Ordering::SeqCst);
|
||||
let prospective_value = cmp::max(minimal, self.prospective_value - dropped.into());
|
||||
self.prospective_value = prospective_value + 1.into();
|
||||
let prospective_value = cmp::max(minimal, self.prospective_value - dropped);
|
||||
self.prospective_value = prospective_value + 1;
|
||||
|
||||
let (next, rx) = oneshot::channel();
|
||||
let next = Some(next);
|
||||
@ -236,7 +236,7 @@ impl Ready {
|
||||
pub fn mark_used(mut self) {
|
||||
let next = self.next.take().expect("Nonce can be marked as used only once; qed");
|
||||
self.next_sent.store(true, atomic::Ordering::SeqCst);
|
||||
next.send(self.value + 1.into()).expect(Self::RECV_PROOF);
|
||||
next.send(self.value + 1).expect(Self::RECV_PROOF);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -78,7 +78,7 @@ impl TestMinerService {
|
||||
pub fn increment_nonce(&self, address: &Address) {
|
||||
let mut next_nonces = self.next_nonces.write();
|
||||
let nonce = next_nonces.entry(*address).or_insert_with(|| 0.into());
|
||||
*nonce = *nonce + 1.into();
|
||||
*nonce = *nonce + 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -17,4 +17,4 @@ parking_lot = "0.6"
|
||||
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
|
||||
jsonrpc-ws-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
|
||||
parity-rpc = { path = "../rpc" }
|
||||
keccak-hash = { git = "https://github.com/paritytech/parity-common" }
|
||||
keccak-hash = "0.1"
|
||||
|
@ -24,19 +24,19 @@ tokio-service = "0.1"
|
||||
tokio-proto = "0.1"
|
||||
url = "1.0"
|
||||
ethcore = { path = "../ethcore" }
|
||||
parity-bytes = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-crypto = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-bytes = "0.1"
|
||||
parity-crypto = "0.1"
|
||||
ethcore-logger = { path = "../logger" }
|
||||
ethcore-sync = { path = "../ethcore/sync" }
|
||||
ethcore-transaction = { path = "../ethcore/transaction" }
|
||||
ethereum-types = "0.3"
|
||||
kvdb = "0.1.0"
|
||||
keccak-hash = { git = "https://github.com/paritytech/parity-common" }
|
||||
ethereum-types = "0.4"
|
||||
kvdb = "0.1"
|
||||
keccak-hash = "0.1"
|
||||
ethkey = { path = "../ethkey" }
|
||||
lazy_static = "1.0"
|
||||
ethabi = "5.1"
|
||||
ethabi-derive = "5.0"
|
||||
ethabi-contract = "5.0"
|
||||
ethabi = "5.1.2"
|
||||
ethabi-derive = "5.1.3"
|
||||
ethabi-contract = "5.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
ethcore = { path = "../ethcore", features = ["test-helpers"] }
|
||||
|
@ -479,7 +479,7 @@ pub fn serialize_ecdsa_signature(nonce_public: &Public, signature_r: Secret, mut
|
||||
let mut signature_v = {
|
||||
let nonce_public_x = public_x(nonce_public);
|
||||
let nonce_public_y: U256 = public_y(nonce_public).into();
|
||||
let nonce_public_y_is_odd = !(nonce_public_y % 2.into()).is_zero();
|
||||
let nonce_public_y_is_odd = !(nonce_public_y % 2).is_zero();
|
||||
let bit0 = if nonce_public_y_is_odd { 1u8 } else { 0u8 };
|
||||
let bit1 = if nonce_public_x != *signature_r { 2u8 } else { 0u8 };
|
||||
bit0 | bit1
|
||||
@ -487,7 +487,7 @@ pub fn serialize_ecdsa_signature(nonce_public: &Public, signature_r: Secret, mut
|
||||
|
||||
// fix high S
|
||||
let curve_order = math::curve_order();
|
||||
let curve_order_half = curve_order / 2.into();
|
||||
let curve_order_half = curve_order / 2;
|
||||
let s_numeric: U256 = (*signature_s).into();
|
||||
if s_numeric > curve_order_half {
|
||||
let signature_s_hash: H256 = (curve_order - s_numeric).into();
|
||||
|
@ -422,7 +422,7 @@ impl<F> Iterator for PendingRequestsIterator<F> where F: Fn(U256) -> Option<(boo
|
||||
}
|
||||
|
||||
let index = self.index.clone();
|
||||
self.index = self.index + 1.into();
|
||||
self.index = self.index + 1;
|
||||
|
||||
(self.read_request)(index)
|
||||
}
|
||||
@ -691,7 +691,7 @@ impl DocumentKeyShadowRetrievalService {
|
||||
for participant in participants {
|
||||
let participant_index = Self::map_key_server_address(client, contract_address, contract, participant.clone())
|
||||
.map_err(|e| format!("Error searching for {} participant: {}", participant, e))?;
|
||||
participants_mask = participants_mask | (U256::one() << participant_index.into());
|
||||
participants_mask = participants_mask | (U256::one() << participant_index);
|
||||
}
|
||||
Ok(contract.functions()
|
||||
.document_key_personal_retrieved()
|
||||
|
@ -575,8 +575,8 @@ fn is_processed_by_this_key_server(key_server_set: &KeyServerSet, node: &NodeId,
|
||||
};
|
||||
|
||||
let server_key_id_value: U256 = server_key_id.into();
|
||||
let range_interval = U256::max_value() / total_servers_count.into();
|
||||
let range_begin = (range_interval + 1.into()) * this_server_index as u32;
|
||||
let range_interval = U256::max_value() / total_servers_count;
|
||||
let range_begin = (range_interval + 1) * this_server_index as u32;
|
||||
let range_end = range_begin.saturating_add(range_interval);
|
||||
|
||||
server_key_id_value >= range_begin && server_key_id_value <= range_end
|
||||
|
@ -1,15 +0,0 @@
|
||||
[package]
|
||||
description = "Generic transaction pool."
|
||||
name = "transaction-pool"
|
||||
version = "1.13.1"
|
||||
license = "GPL-3.0"
|
||||
authors = ["Parity Technologies <admin@parity.io>"]
|
||||
|
||||
[dependencies]
|
||||
error-chain = "0.12"
|
||||
log = "0.4"
|
||||
smallvec = "0.4"
|
||||
trace-time = { path = "../util/trace-time", version = "0.1" }
|
||||
|
||||
[dev-dependencies]
|
||||
ethereum-types = "0.3"
|
@ -1,53 +0,0 @@
|
||||
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
/// Error chain doesn't let us have generic types.
|
||||
/// So the hashes are converted to debug strings for easy display.
|
||||
type Hash = String;
|
||||
|
||||
error_chain! {
|
||||
errors {
|
||||
/// Transaction is already imported
|
||||
AlreadyImported(hash: Hash) {
|
||||
description("transaction is already in the pool"),
|
||||
display("[{}] already imported", hash)
|
||||
}
|
||||
/// Transaction is too cheap to enter the queue
|
||||
TooCheapToEnter(hash: Hash, min_score: String) {
|
||||
description("the pool is full and transaction is too cheap to replace any transaction"),
|
||||
display("[{}] too cheap to enter the pool. Min score: {}", hash, min_score)
|
||||
}
|
||||
/// Transaction is too cheap to replace existing transaction that occupies the same slot.
|
||||
TooCheapToReplace(old_hash: Hash, hash: Hash) {
|
||||
description("transaction is too cheap to replace existing transaction in the pool"),
|
||||
display("[{}] too cheap to replace: {}", hash, old_hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl PartialEq for ErrorKind {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
use self::ErrorKind::*;
|
||||
|
||||
match (self, other) {
|
||||
(&AlreadyImported(ref h1), &AlreadyImported(ref h2)) => h1 == h2,
|
||||
(&TooCheapToEnter(ref h1, ref s1), &TooCheapToEnter(ref h2, ref s2)) => h1 == h2 && s1 == s2,
|
||||
(&TooCheapToReplace(ref old1, ref new1), &TooCheapToReplace(ref old2, ref new2)) => old1 == old2 && new1 == new2,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
@ -1,124 +0,0 @@
|
||||
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Generic Transaction Pool
|
||||
//!
|
||||
//! An extensible and performant implementation of Ethereum Transaction Pool.
|
||||
//! The pool stores ordered, verified transactions according to some pluggable
|
||||
//! `Scoring` implementation.
|
||||
//! The pool also allows you to construct a set of `pending` transactions according
|
||||
//! to some notion of `Readiness` (pluggable).
|
||||
//!
|
||||
//! The pool is generic over transactions and should make no assumptions about them.
|
||||
//! The only thing we can rely on is the `Scoring` that defines:
|
||||
//! - the ordering of transactions from a single sender
|
||||
//! - the priority of the transaction compared to other transactions from different senders
|
||||
//!
|
||||
//! NOTE: the transactions from a single sender are not ordered by priority,
|
||||
//! but still when constructing pending set we always need to maintain the ordering
|
||||
//! (i.e. `txs[1]` always needs to be included after `txs[0]` even if it has higher priority)
|
||||
//!
|
||||
//! ### Design Details
|
||||
//!
|
||||
//! Performance assumptions:
|
||||
//! - Possibility to handle tens of thousands of transactions
|
||||
//! - Fast insertions and replacements `O(per-sender + log(senders))`
|
||||
//! - Reasonably fast removal of stalled transactions `O(per-sender)`
|
||||
//! - Reasonably fast construction of pending set `O(txs * (log(senders) + log(per-sender))`
|
||||
//!
|
||||
//! The removal performance could be improved by trading some memory. Currently `SmallVec` is used
|
||||
//! to store senders transactions, instead we could use `VecDeque` and efficiently `pop_front`
|
||||
//! the best transactions.
|
||||
//!
|
||||
//! The pending set construction and insertion complexity could be reduced by introducing
|
||||
//! a notion of `nonce` - an absolute, numeric ordering of transactions.
|
||||
//! We don't do that because of possible implications of EIP208 where nonce might not be
|
||||
//! explicitly available.
|
||||
//!
|
||||
//! 1. The pool groups transactions from particular sender together
|
||||
//! and stores them ordered by `Scoring` within that group
|
||||
//! i.e. `HashMap<Sender, Vec<Transaction>>`.
|
||||
//! 2. Additionaly we maintain the best and the worst transaction from each sender
|
||||
//! (by `Scoring` not `priority`) ordered by `priority`.
|
||||
//! It means that we can easily identify the best transaction inside the entire pool
|
||||
//! and the worst transaction.
|
||||
//! 3. Whenever new transaction is inserted to the queue:
|
||||
//! - first check all the limits (overall, memory, per-sender)
|
||||
//! - retrieve all transactions from a sender
|
||||
//! - binary search for position to insert the transaction
|
||||
//! - decide if we are replacing existing transaction (3 outcomes: drop, replace, insert)
|
||||
//! - update best and worst transaction from that sender if affected
|
||||
//! 4. Pending List construction:
|
||||
//! - Take the best transaction (by priority) from all senders to the List
|
||||
//! - Replace the transaction with next transaction (by ordering) from that sender (if any)
|
||||
//! - Repeat
|
||||
|
||||
#![warn(missing_docs)]
|
||||
|
||||
extern crate smallvec;
|
||||
extern crate trace_time;
|
||||
|
||||
#[macro_use]
|
||||
extern crate error_chain;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
#[cfg(test)]
|
||||
extern crate ethereum_types;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
mod error;
|
||||
mod listener;
|
||||
mod options;
|
||||
mod pool;
|
||||
mod ready;
|
||||
mod status;
|
||||
mod transactions;
|
||||
mod verifier;
|
||||
|
||||
pub mod scoring;
|
||||
|
||||
pub use self::error::{Error, ErrorKind};
|
||||
pub use self::listener::{Listener, NoopListener};
|
||||
pub use self::options::Options;
|
||||
pub use self::pool::{Pool, PendingIterator, UnorderedIterator, Transaction};
|
||||
pub use self::ready::{Ready, Readiness};
|
||||
pub use self::scoring::Scoring;
|
||||
pub use self::status::{LightStatus, Status};
|
||||
pub use self::verifier::Verifier;
|
||||
|
||||
use std::fmt;
|
||||
use std::hash::Hash;
|
||||
|
||||
/// Already verified transaction that can be safely queued.
|
||||
pub trait VerifiedTransaction: fmt::Debug {
|
||||
/// Transaction hash type.
|
||||
type Hash: fmt::Debug + fmt::LowerHex + Eq + Clone + Hash;
|
||||
|
||||
/// Transaction sender type.
|
||||
type Sender: fmt::Debug + Eq + Clone + Hash + Send;
|
||||
|
||||
/// Transaction hash
|
||||
fn hash(&self) -> &Self::Hash;
|
||||
|
||||
/// Memory usage
|
||||
fn mem_usage(&self) -> usize;
|
||||
|
||||
/// Transaction sender
|
||||
fn sender(&self) -> &Self::Sender;
|
||||
}
|
@ -1,85 +0,0 @@
|
||||
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::sync::Arc;
|
||||
use error::ErrorKind;
|
||||
|
||||
/// Transaction pool listener.
|
||||
///
|
||||
/// Listener is being notified about status of every transaction in the pool.
|
||||
pub trait Listener<T> {
|
||||
/// The transaction has been successfuly added to the pool.
|
||||
/// If second argument is `Some` the transaction has took place of some other transaction
|
||||
/// which was already in pool.
|
||||
/// NOTE: You won't be notified about drop of `old` transaction separately.
|
||||
fn added(&mut self, _tx: &Arc<T>, _old: Option<&Arc<T>>) {}
|
||||
|
||||
/// The transaction was rejected from the pool.
|
||||
/// It means that it was too cheap to replace any transaction already in the pool.
|
||||
fn rejected(&mut self, _tx: &Arc<T>, _reason: &ErrorKind) {}
|
||||
|
||||
/// The transaction was pushed out from the pool because of the limit.
|
||||
fn dropped(&mut self, _tx: &Arc<T>, _by: Option<&T>) {}
|
||||
|
||||
/// The transaction was marked as invalid by executor.
|
||||
fn invalid(&mut self, _tx: &Arc<T>) {}
|
||||
|
||||
/// The transaction has been canceled.
|
||||
fn canceled(&mut self, _tx: &Arc<T>) {}
|
||||
|
||||
/// The transaction has been culled from the pool.
|
||||
fn culled(&mut self, _tx: &Arc<T>) {}
|
||||
}
|
||||
|
||||
/// A no-op implementation of `Listener`.
|
||||
#[derive(Debug)]
|
||||
pub struct NoopListener;
|
||||
impl<T> Listener<T> for NoopListener {}
|
||||
|
||||
impl<T, A, B> Listener<T> for (A, B) where
|
||||
A: Listener<T>,
|
||||
B: Listener<T>,
|
||||
{
|
||||
fn added(&mut self, tx: &Arc<T>, old: Option<&Arc<T>>) {
|
||||
self.0.added(tx, old);
|
||||
self.1.added(tx, old);
|
||||
}
|
||||
|
||||
fn rejected(&mut self, tx: &Arc<T>, reason: &ErrorKind) {
|
||||
self.0.rejected(tx, reason);
|
||||
self.1.rejected(tx, reason);
|
||||
}
|
||||
|
||||
fn dropped(&mut self, tx: &Arc<T>, by: Option<&T>) {
|
||||
self.0.dropped(tx, by);
|
||||
self.1.dropped(tx, by);
|
||||
}
|
||||
|
||||
fn invalid(&mut self, tx: &Arc<T>) {
|
||||
self.0.invalid(tx);
|
||||
self.1.invalid(tx);
|
||||
}
|
||||
|
||||
fn canceled(&mut self, tx: &Arc<T>) {
|
||||
self.0.canceled(tx);
|
||||
self.1.canceled(tx);
|
||||
}
|
||||
|
||||
fn culled(&mut self, tx: &Arc<T>) {
|
||||
self.0.culled(tx);
|
||||
self.1.culled(tx);
|
||||
}
|
||||
}
|
@ -1,36 +0,0 @@
|
||||
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
/// Transaction Pool options.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct Options {
|
||||
/// Maximal number of transactions in the pool.
|
||||
pub max_count: usize,
|
||||
/// Maximal number of transactions from single sender.
|
||||
pub max_per_sender: usize,
|
||||
/// Maximal memory usage.
|
||||
pub max_mem_usage: usize,
|
||||
}
|
||||
|
||||
impl Default for Options {
|
||||
fn default() -> Self {
|
||||
Options {
|
||||
max_count: 1024,
|
||||
max_per_sender: 16,
|
||||
max_mem_usage: 8 * 1024 * 1024,
|
||||
}
|
||||
}
|
||||
}
|
@ -1,616 +0,0 @@
|
||||
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::slice;
|
||||
use std::collections::{hash_map, HashMap, BTreeSet};
|
||||
|
||||
use error;
|
||||
use listener::{Listener, NoopListener};
|
||||
use options::Options;
|
||||
use ready::{Ready, Readiness};
|
||||
use scoring::{self, Scoring, ScoreWithRef};
|
||||
use status::{LightStatus, Status};
|
||||
use transactions::{AddResult, Transactions};
|
||||
|
||||
use {VerifiedTransaction};
|
||||
|
||||
/// Internal representation of transaction.
|
||||
///
|
||||
/// Includes unique insertion id that can be used for scoring explictly,
|
||||
/// but internally is used to resolve conflicts in case of equal scoring
|
||||
/// (newer transactionsa are preferred).
|
||||
#[derive(Debug)]
|
||||
pub struct Transaction<T> {
|
||||
/// Sequential id of the transaction
|
||||
pub insertion_id: u64,
|
||||
/// Shared transaction
|
||||
pub transaction: Arc<T>,
|
||||
}
|
||||
|
||||
impl<T> Clone for Transaction<T> {
|
||||
fn clone(&self) -> Self {
|
||||
Transaction {
|
||||
insertion_id: self.insertion_id,
|
||||
transaction: self.transaction.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ::std::ops::Deref for Transaction<T> {
|
||||
type Target = Arc<T>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.transaction
|
||||
}
|
||||
}
|
||||
|
||||
/// A transaction pool.
|
||||
#[derive(Debug)]
|
||||
pub struct Pool<T: VerifiedTransaction, S: Scoring<T>, L = NoopListener> {
|
||||
listener: L,
|
||||
scoring: S,
|
||||
options: Options,
|
||||
mem_usage: usize,
|
||||
|
||||
transactions: HashMap<T::Sender, Transactions<T, S>>,
|
||||
by_hash: HashMap<T::Hash, Transaction<T>>,
|
||||
|
||||
best_transactions: BTreeSet<ScoreWithRef<T, S::Score>>,
|
||||
worst_transactions: BTreeSet<ScoreWithRef<T, S::Score>>,
|
||||
|
||||
insertion_id: u64,
|
||||
}
|
||||
|
||||
impl<T: VerifiedTransaction, S: Scoring<T> + Default> Default for Pool<T, S> {
|
||||
fn default() -> Self {
|
||||
Self::with_scoring(S::default(), Options::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: VerifiedTransaction, S: Scoring<T> + Default> Pool<T, S> {
|
||||
/// Creates a new `Pool` with given options
|
||||
/// and default `Scoring` and `Listener`.
|
||||
pub fn with_options(options: Options) -> Self {
|
||||
Self::with_scoring(S::default(), options)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: VerifiedTransaction, S: Scoring<T>> Pool<T, S> {
|
||||
/// Creates a new `Pool` with given `Scoring` and options.
|
||||
pub fn with_scoring(scoring: S, options: Options) -> Self {
|
||||
Self::new(NoopListener, scoring, options)
|
||||
}
|
||||
}
|
||||
|
||||
const INITIAL_NUMBER_OF_SENDERS: usize = 16;
|
||||
|
||||
impl<T, S, L> Pool<T, S, L> where
|
||||
T: VerifiedTransaction,
|
||||
S: Scoring<T>,
|
||||
L: Listener<T>,
|
||||
{
|
||||
/// Creates new `Pool` with given `Scoring`, `Listener` and options.
|
||||
pub fn new(listener: L, scoring: S, options: Options) -> Self {
|
||||
let transactions = HashMap::with_capacity(INITIAL_NUMBER_OF_SENDERS);
|
||||
let by_hash = HashMap::with_capacity(options.max_count / 16);
|
||||
|
||||
Pool {
|
||||
listener,
|
||||
scoring,
|
||||
options,
|
||||
mem_usage: 0,
|
||||
transactions,
|
||||
by_hash,
|
||||
best_transactions: Default::default(),
|
||||
worst_transactions: Default::default(),
|
||||
insertion_id: 0,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/// Attempts to import new transaction to the pool, returns a `Arc<T>` or an `Error`.
|
||||
///
|
||||
/// NOTE: Since `Ready`ness is separate from the pool it's possible to import stalled transactions.
|
||||
/// It's the caller responsibility to make sure that's not the case.
|
||||
///
|
||||
/// NOTE: The transaction may push out some other transactions from the pool
|
||||
/// either because of limits (see `Options`) or because `Scoring` decides that the transaction
|
||||
/// replaces an existing transaction from that sender.
|
||||
/// If any limit is reached the transaction with the lowest `Score` is evicted to make room.
|
||||
///
|
||||
/// The `Listener` will be informed on any drops or rejections.
|
||||
pub fn import(&mut self, transaction: T) -> error::Result<Arc<T>> {
|
||||
let mem_usage = transaction.mem_usage();
|
||||
|
||||
ensure!(!self.by_hash.contains_key(transaction.hash()), error::ErrorKind::AlreadyImported(format!("{:?}", transaction.hash())));
|
||||
|
||||
self.insertion_id += 1;
|
||||
let transaction = Transaction {
|
||||
insertion_id: self.insertion_id,
|
||||
transaction: Arc::new(transaction),
|
||||
};
|
||||
|
||||
// TODO [ToDr] Most likely move this after the transaction is inserted.
|
||||
// Avoid using should_replace, but rather use scoring for that.
|
||||
{
|
||||
let remove_worst = |s: &mut Self, transaction| {
|
||||
match s.remove_worst(transaction) {
|
||||
Err(err) => {
|
||||
s.listener.rejected(transaction, err.kind());
|
||||
Err(err)
|
||||
},
|
||||
Ok(None) => Ok(false),
|
||||
Ok(Some(removed)) => {
|
||||
s.listener.dropped(&removed, Some(transaction));
|
||||
s.finalize_remove(removed.hash());
|
||||
Ok(true)
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
while self.by_hash.len() + 1 > self.options.max_count {
|
||||
trace!("Count limit reached: {} > {}", self.by_hash.len() + 1, self.options.max_count);
|
||||
if !remove_worst(self, &transaction)? {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
while self.mem_usage + mem_usage > self.options.max_mem_usage {
|
||||
trace!("Mem limit reached: {} > {}", self.mem_usage + mem_usage, self.options.max_mem_usage);
|
||||
if !remove_worst(self, &transaction)? {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let (result, prev_state, current_state) = {
|
||||
let transactions = self.transactions.entry(transaction.sender().clone()).or_insert_with(Transactions::default);
|
||||
// get worst and best transactions for comparison
|
||||
let prev = transactions.worst_and_best();
|
||||
let result = transactions.add(transaction, &self.scoring, self.options.max_per_sender);
|
||||
let current = transactions.worst_and_best();
|
||||
(result, prev, current)
|
||||
};
|
||||
|
||||
// update best and worst transactions from this sender (if required)
|
||||
self.update_senders_worst_and_best(prev_state, current_state);
|
||||
|
||||
match result {
|
||||
AddResult::Ok(tx) => {
|
||||
self.listener.added(&tx, None);
|
||||
self.finalize_insert(&tx, None);
|
||||
Ok(tx.transaction)
|
||||
},
|
||||
AddResult::PushedOut { new, old } |
|
||||
AddResult::Replaced { new, old } => {
|
||||
self.listener.added(&new, Some(&old));
|
||||
self.finalize_insert(&new, Some(&old));
|
||||
Ok(new.transaction)
|
||||
},
|
||||
AddResult::TooCheap { new, old } => {
|
||||
let error = error::ErrorKind::TooCheapToReplace(format!("{:x}", old.hash()), format!("{:x}", new.hash()));
|
||||
self.listener.rejected(&new, &error);
|
||||
bail!(error)
|
||||
},
|
||||
AddResult::TooCheapToEnter(new, score) => {
|
||||
let error = error::ErrorKind::TooCheapToEnter(format!("{:x}", new.hash()), format!("{:?}", score));
|
||||
self.listener.rejected(&new, &error);
|
||||
bail!(error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Updates state of the pool statistics if the transaction was added to a set.
|
||||
fn finalize_insert(&mut self, new: &Transaction<T>, old: Option<&Transaction<T>>) {
|
||||
self.mem_usage += new.mem_usage();
|
||||
self.by_hash.insert(new.hash().clone(), new.clone());
|
||||
|
||||
if let Some(old) = old {
|
||||
self.finalize_remove(old.hash());
|
||||
}
|
||||
}
|
||||
|
||||
/// Updates the pool statistics if transaction was removed.
|
||||
fn finalize_remove(&mut self, hash: &T::Hash) -> Option<Arc<T>> {
|
||||
self.by_hash.remove(hash).map(|old| {
|
||||
self.mem_usage -= old.transaction.mem_usage();
|
||||
old.transaction
|
||||
})
|
||||
}
|
||||
|
||||
/// Updates best and worst transactions from a sender.
|
||||
fn update_senders_worst_and_best(
|
||||
&mut self,
|
||||
previous: Option<((S::Score, Transaction<T>), (S::Score, Transaction<T>))>,
|
||||
current: Option<((S::Score, Transaction<T>), (S::Score, Transaction<T>))>,
|
||||
) {
|
||||
let worst_collection = &mut self.worst_transactions;
|
||||
let best_collection = &mut self.best_transactions;
|
||||
|
||||
let is_same = |a: &(S::Score, Transaction<T>), b: &(S::Score, Transaction<T>)| {
|
||||
a.0 == b.0 && a.1.hash() == b.1.hash()
|
||||
};
|
||||
|
||||
let update = |collection: &mut BTreeSet<_>, (score, tx), remove| if remove {
|
||||
collection.remove(&ScoreWithRef::new(score, tx));
|
||||
} else {
|
||||
collection.insert(ScoreWithRef::new(score, tx));
|
||||
};
|
||||
|
||||
match (previous, current) {
|
||||
(None, Some((worst, best))) => {
|
||||
update(worst_collection, worst, false);
|
||||
update(best_collection, best, false);
|
||||
},
|
||||
(Some((worst, best)), None) => {
|
||||
// all transactions from that sender has been removed.
|
||||
// We can clear a hashmap entry.
|
||||
self.transactions.remove(worst.1.sender());
|
||||
update(worst_collection, worst, true);
|
||||
update(best_collection, best, true);
|
||||
},
|
||||
(Some((w1, b1)), Some((w2, b2))) => {
|
||||
if !is_same(&w1, &w2) {
|
||||
update(worst_collection, w1, true);
|
||||
update(worst_collection, w2, false);
|
||||
}
|
||||
if !is_same(&b1, &b2) {
|
||||
update(best_collection, b1, true);
|
||||
update(best_collection, b2, false);
|
||||
}
|
||||
},
|
||||
(None, None) => {},
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to remove the worst transaction from the pool if it's worse than the given one.
|
||||
///
|
||||
/// Returns `None` in case we couldn't decide if the transaction should replace the worst transaction or not.
|
||||
/// In such case we will accept the transaction even though it is going to exceed the limit.
|
||||
fn remove_worst(&mut self, transaction: &Transaction<T>) -> error::Result<Option<Transaction<T>>> {
|
||||
let to_remove = match self.worst_transactions.iter().next_back() {
|
||||
// No elements to remove? and the pool is still full?
|
||||
None => {
|
||||
warn!("The pool is full but there are no transactions to remove.");
|
||||
return Err(error::ErrorKind::TooCheapToEnter(format!("{:?}", transaction.hash()), "unknown".into()).into());
|
||||
},
|
||||
Some(old) => match self.scoring.should_replace(&old.transaction, transaction) {
|
||||
// We can't decide which of them should be removed, so accept both.
|
||||
scoring::Choice::InsertNew => None,
|
||||
// New transaction is better than the worst one so we can replace it.
|
||||
scoring::Choice::ReplaceOld => Some(old.clone()),
|
||||
// otherwise fail
|
||||
scoring::Choice::RejectNew => {
|
||||
return Err(error::ErrorKind::TooCheapToEnter(format!("{:?}", transaction.hash()), format!("{:?}", old.score)).into())
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
if let Some(to_remove) = to_remove {
|
||||
// Remove from transaction set
|
||||
self.remove_from_set(to_remove.transaction.sender(), |set, scoring| {
|
||||
set.remove(&to_remove.transaction, scoring)
|
||||
});
|
||||
|
||||
Ok(Some(to_remove.transaction))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes transaction from sender's transaction `HashMap`.
|
||||
fn remove_from_set<R, F: FnOnce(&mut Transactions<T, S>, &S) -> R>(&mut self, sender: &T::Sender, f: F) -> Option<R> {
|
||||
let (prev, next, result) = if let Some(set) = self.transactions.get_mut(sender) {
|
||||
let prev = set.worst_and_best();
|
||||
let result = f(set, &self.scoring);
|
||||
(prev, set.worst_and_best(), result)
|
||||
} else {
|
||||
return None;
|
||||
};
|
||||
|
||||
self.update_senders_worst_and_best(prev, next);
|
||||
Some(result)
|
||||
}
|
||||
|
||||
/// Clears pool from all transactions.
|
||||
/// This causes a listener notification that all transactions were dropped.
|
||||
/// NOTE: the drop-notification order will be arbitrary.
|
||||
pub fn clear(&mut self) {
|
||||
self.mem_usage = 0;
|
||||
self.transactions.clear();
|
||||
self.best_transactions.clear();
|
||||
self.worst_transactions.clear();
|
||||
|
||||
for (_hash, tx) in self.by_hash.drain() {
|
||||
self.listener.dropped(&tx.transaction, None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes single transaction from the pool.
|
||||
/// Depending on the `is_invalid` flag the listener
|
||||
/// will either get a `cancelled` or `invalid` notification.
|
||||
pub fn remove(&mut self, hash: &T::Hash, is_invalid: bool) -> Option<Arc<T>> {
|
||||
if let Some(tx) = self.finalize_remove(hash) {
|
||||
self.remove_from_set(tx.sender(), |set, scoring| {
|
||||
set.remove(&tx, scoring)
|
||||
});
|
||||
if is_invalid {
|
||||
self.listener.invalid(&tx);
|
||||
} else {
|
||||
self.listener.canceled(&tx);
|
||||
}
|
||||
Some(tx)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes all stalled transactions from given sender.
|
||||
fn remove_stalled<R: Ready<T>>(&mut self, sender: &T::Sender, ready: &mut R) -> usize {
|
||||
let removed_from_set = self.remove_from_set(sender, |transactions, scoring| {
|
||||
transactions.cull(ready, scoring)
|
||||
});
|
||||
|
||||
match removed_from_set {
|
||||
Some(removed) => {
|
||||
let len = removed.len();
|
||||
for tx in removed {
|
||||
self.finalize_remove(tx.hash());
|
||||
self.listener.culled(&tx);
|
||||
}
|
||||
len
|
||||
},
|
||||
None => 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes all stalled transactions from given sender list (or from all senders).
|
||||
pub fn cull<R: Ready<T>>(&mut self, senders: Option<&[T::Sender]>, mut ready: R) -> usize {
|
||||
let mut removed = 0;
|
||||
match senders {
|
||||
Some(senders) => {
|
||||
for sender in senders {
|
||||
removed += self.remove_stalled(sender, &mut ready);
|
||||
}
|
||||
},
|
||||
None => {
|
||||
let senders = self.transactions.keys().cloned().collect::<Vec<_>>();
|
||||
for sender in senders {
|
||||
removed += self.remove_stalled(&sender, &mut ready);
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
removed
|
||||
}
|
||||
|
||||
/// Returns a transaction if it's part of the pool or `None` otherwise.
|
||||
pub fn find(&self, hash: &T::Hash) -> Option<Arc<T>> {
|
||||
self.by_hash.get(hash).map(|t| t.transaction.clone())
|
||||
}
|
||||
|
||||
/// Returns worst transaction in the queue (if any).
|
||||
pub fn worst_transaction(&self) -> Option<Arc<T>> {
|
||||
self.worst_transactions.iter().next_back().map(|x| x.transaction.transaction.clone())
|
||||
}
|
||||
|
||||
/// Returns true if the pool is at it's capacity.
|
||||
pub fn is_full(&self) -> bool {
|
||||
self.by_hash.len() >= self.options.max_count
|
||||
|| self.mem_usage >= self.options.max_mem_usage
|
||||
}
|
||||
|
||||
/// Returns senders ordered by priority of their transactions.
|
||||
pub fn senders(&self) -> impl Iterator<Item=&T::Sender> {
|
||||
self.best_transactions.iter().map(|tx| tx.transaction.sender())
|
||||
}
|
||||
|
||||
/// Returns an iterator of pending (ready) transactions.
|
||||
pub fn pending<R: Ready<T>>(&self, ready: R) -> PendingIterator<T, R, S, L> {
|
||||
PendingIterator {
|
||||
ready,
|
||||
best_transactions: self.best_transactions.clone(),
|
||||
pool: self,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns pending (ready) transactions from given sender.
|
||||
pub fn pending_from_sender<R: Ready<T>>(&self, ready: R, sender: &T::Sender) -> PendingIterator<T, R, S, L> {
|
||||
let best_transactions = self.transactions.get(sender)
|
||||
.and_then(|transactions| transactions.worst_and_best())
|
||||
.map(|(_, best)| ScoreWithRef::new(best.0, best.1))
|
||||
.map(|s| {
|
||||
let mut set = BTreeSet::new();
|
||||
set.insert(s);
|
||||
set
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
PendingIterator {
|
||||
ready,
|
||||
best_transactions,
|
||||
pool: self,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns unprioritized list of ready transactions.
|
||||
pub fn unordered_pending<R: Ready<T>>(&self, ready: R) -> UnorderedIterator<T, R, S> {
|
||||
UnorderedIterator {
|
||||
ready,
|
||||
senders: self.transactions.iter(),
|
||||
transactions: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Update score of transactions of a particular sender.
|
||||
pub fn update_scores(&mut self, sender: &T::Sender, event: S::Event) {
|
||||
let res = if let Some(set) = self.transactions.get_mut(sender) {
|
||||
let prev = set.worst_and_best();
|
||||
set.update_scores(&self.scoring, event);
|
||||
let current = set.worst_and_best();
|
||||
Some((prev, current))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if let Some((prev, current)) = res {
|
||||
self.update_senders_worst_and_best(prev, current);
|
||||
}
|
||||
}
|
||||
|
||||
/// Computes the full status of the pool (including readiness).
|
||||
pub fn status<R: Ready<T>>(&self, mut ready: R) -> Status {
|
||||
let mut status = Status::default();
|
||||
|
||||
for (_sender, transactions) in &self.transactions {
|
||||
let len = transactions.len();
|
||||
for (idx, tx) in transactions.iter().enumerate() {
|
||||
match ready.is_ready(tx) {
|
||||
Readiness::Stale => status.stalled += 1,
|
||||
Readiness::Ready => status.pending += 1,
|
||||
Readiness::Future => {
|
||||
status.future += len - idx;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
status
|
||||
}
|
||||
|
||||
/// Returns light status of the pool.
|
||||
pub fn light_status(&self) -> LightStatus {
|
||||
LightStatus {
|
||||
mem_usage: self.mem_usage,
|
||||
transaction_count: self.by_hash.len(),
|
||||
senders: self.transactions.len(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns current pool options.
|
||||
pub fn options(&self) -> Options {
|
||||
self.options.clone()
|
||||
}
|
||||
|
||||
/// Borrows listener instance.
|
||||
pub fn listener(&self) -> &L {
|
||||
&self.listener
|
||||
}
|
||||
|
||||
/// Borrows scoring instance.
|
||||
pub fn scoring(&self) -> &S {
|
||||
&self.scoring
|
||||
}
|
||||
|
||||
/// Borrows listener mutably.
|
||||
pub fn listener_mut(&mut self) -> &mut L {
|
||||
&mut self.listener
|
||||
}
|
||||
}
|
||||
|
||||
/// An iterator over all pending (ready) transactions in unoredered fashion.
|
||||
///
|
||||
/// NOTE: Current implementation will iterate over all transactions from particular sender
|
||||
/// ordered by nonce, but that might change in the future.
|
||||
///
|
||||
/// NOTE: the transactions are not removed from the queue.
|
||||
/// You might remove them later by calling `cull`.
|
||||
pub struct UnorderedIterator<'a, T, R, S> where
|
||||
T: VerifiedTransaction + 'a,
|
||||
S: Scoring<T> + 'a,
|
||||
{
|
||||
ready: R,
|
||||
senders: hash_map::Iter<'a, T::Sender, Transactions<T, S>>,
|
||||
transactions: Option<slice::Iter<'a, Transaction<T>>>,
|
||||
}
|
||||
|
||||
impl<'a, T, R, S> Iterator for UnorderedIterator<'a, T, R, S> where
|
||||
T: VerifiedTransaction,
|
||||
R: Ready<T>,
|
||||
S: Scoring<T>,
|
||||
{
|
||||
type Item = Arc<T>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
loop {
|
||||
if let Some(transactions) = self.transactions.as_mut() {
|
||||
if let Some(tx) = transactions.next() {
|
||||
match self.ready.is_ready(&tx) {
|
||||
Readiness::Ready => {
|
||||
return Some(tx.transaction.clone());
|
||||
},
|
||||
state => trace!("[{:?}] Ignoring {:?} transaction.", tx.hash(), state),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// otherwise fallback and try next sender
|
||||
let next_sender = self.senders.next()?;
|
||||
self.transactions = Some(next_sender.1.iter());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// An iterator over all pending (ready) transactions.
|
||||
/// NOTE: the transactions are not removed from the queue.
|
||||
/// You might remove them later by calling `cull`.
|
||||
pub struct PendingIterator<'a, T, R, S, L> where
|
||||
T: VerifiedTransaction + 'a,
|
||||
S: Scoring<T> + 'a,
|
||||
L: 'a,
|
||||
{
|
||||
ready: R,
|
||||
best_transactions: BTreeSet<ScoreWithRef<T, S::Score>>,
|
||||
pool: &'a Pool<T, S, L>,
|
||||
}
|
||||
|
||||
impl<'a, T, R, S, L> Iterator for PendingIterator<'a, T, R, S, L> where
|
||||
T: VerifiedTransaction,
|
||||
R: Ready<T>,
|
||||
S: Scoring<T>,
|
||||
{
|
||||
type Item = Arc<T>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
while !self.best_transactions.is_empty() {
|
||||
let best = {
|
||||
let best = self.best_transactions.iter().next().expect("current_best is not empty; qed").clone();
|
||||
self.best_transactions.take(&best).expect("Just taken from iterator; qed")
|
||||
};
|
||||
|
||||
match self.ready.is_ready(&best.transaction) {
|
||||
Readiness::Ready => {
|
||||
// retrieve next one from that sender.
|
||||
let next = self.pool.transactions
|
||||
.get(best.transaction.sender())
|
||||
.and_then(|s| s.find_next(&best.transaction, &self.pool.scoring));
|
||||
if let Some((score, tx)) = next {
|
||||
self.best_transactions.insert(ScoreWithRef::new(score, tx));
|
||||
}
|
||||
|
||||
return Some(best.transaction.transaction)
|
||||
},
|
||||
state => trace!("[{:?}] Ignoring {:?} transaction.", best.transaction.hash(), state),
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
|
@ -1,54 +0,0 @@
|
||||
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
/// Transaction readiness.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Readiness {
|
||||
/// The transaction is stale (and should/will be removed from the pool).
|
||||
Stale,
|
||||
/// The transaction is ready to be included in pending set.
|
||||
Ready,
|
||||
/// The transaction is not yet ready.
|
||||
Future,
|
||||
}
|
||||
|
||||
/// A readiness indicator.
|
||||
pub trait Ready<T> {
|
||||
/// Returns true if transaction is ready to be included in pending block,
|
||||
/// given all previous transactions that were ready are already included.
|
||||
///
|
||||
/// NOTE: readiness of transactions will be checked according to `Score` ordering,
|
||||
/// the implementation should maintain a state of already checked transactions.
|
||||
fn is_ready(&mut self, tx: &T) -> Readiness;
|
||||
}
|
||||
|
||||
impl<T, F> Ready<T> for F where F: FnMut(&T) -> Readiness {
|
||||
fn is_ready(&mut self, tx: &T) -> Readiness {
|
||||
(*self)(tx)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, A, B> Ready<T> for (A, B) where
|
||||
A: Ready<T>,
|
||||
B: Ready<T>,
|
||||
{
|
||||
fn is_ready(&mut self, tx: &T) -> Readiness {
|
||||
match self.0.is_ready(tx) {
|
||||
Readiness::Ready => self.1.is_ready(tx),
|
||||
r => r,
|
||||
}
|
||||
}
|
||||
}
|
@ -1,157 +0,0 @@
|
||||
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! A transactions ordering abstraction.
|
||||
|
||||
use std::{cmp, fmt};
|
||||
use pool::Transaction;
|
||||
|
||||
/// Represents a decision what to do with
|
||||
/// a new transaction that tries to enter the pool.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Choice {
|
||||
/// New transaction should be rejected
|
||||
/// (i.e. the old transaction that occupies the same spot
|
||||
/// is better).
|
||||
RejectNew,
|
||||
/// The old transaction should be dropped
|
||||
/// in favour of the new one.
|
||||
ReplaceOld,
|
||||
/// The new transaction should be inserted
|
||||
/// and both (old and new) should stay in the pool.
|
||||
InsertNew,
|
||||
}
|
||||
|
||||
/// Describes a reason why the `Score` of transactions
|
||||
/// should be updated.
|
||||
/// The `Scoring` implementations can use this information
|
||||
/// to update the `Score` table more efficiently.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Change<T = ()> {
|
||||
/// New transaction has been inserted at given index.
|
||||
/// The Score at that index is initialized with default value
|
||||
/// and needs to be filled in.
|
||||
InsertedAt(usize),
|
||||
/// The transaction has been removed at given index and other transactions
|
||||
/// shifted to it's place.
|
||||
/// The scores were removed and shifted as well.
|
||||
/// For simple scoring algorithms no action is required here.
|
||||
RemovedAt(usize),
|
||||
/// The transaction at given index has replaced a previous transaction.
|
||||
/// The score at that index needs to be update (it contains value from previous transaction).
|
||||
ReplacedAt(usize),
|
||||
/// Given number of stalled transactions has been culled from the beginning.
|
||||
/// The scores has been removed from the beginning as well.
|
||||
/// For simple scoring algorithms no action is required here.
|
||||
Culled(usize),
|
||||
/// Custom event to update the score triggered outside of the pool.
|
||||
/// Handling this event is up to scoring implementation.
|
||||
Event(T),
|
||||
}
|
||||
|
||||
/// A transaction ordering.
|
||||
///
|
||||
/// The implementation should decide on order of transactions in the pool.
|
||||
/// Each transaction should also get assigned a `Score` which is used to later
|
||||
/// prioritize transactions in the pending set.
|
||||
///
|
||||
/// Implementation notes:
|
||||
/// - Returned `Score`s should match ordering of `compare` method.
|
||||
/// - `compare` will be called only within a context of transactions from the same sender.
|
||||
/// - `choose` may be called even if `compare` returns `Ordering::Equal`
|
||||
/// - `should_replace` is used to decide if new transaction should push out an old transaction already in the queue.
|
||||
/// - `Score`s and `compare` should align with `Ready` implementation.
|
||||
///
|
||||
/// Example: Natural ordering of Ethereum transactions.
|
||||
/// - `compare`: compares transaction `nonce` ()
|
||||
/// - `choose`: compares transactions `gasPrice` (decides if old transaction should be replaced)
|
||||
/// - `update_scores`: score defined as `gasPrice` if `n==0` and `max(scores[n-1], gasPrice)` if `n>0`
|
||||
/// - `should_replace`: compares `gasPrice` (decides if transaction from a different sender is more valuable)
|
||||
///
|
||||
pub trait Scoring<T>: fmt::Debug {
|
||||
/// A score of a transaction.
|
||||
type Score: cmp::Ord + Clone + Default + fmt::Debug + Send;
|
||||
/// Custom scoring update event type.
|
||||
type Event: fmt::Debug;
|
||||
|
||||
/// Decides on ordering of `T`s from a particular sender.
|
||||
fn compare(&self, old: &T, other: &T) -> cmp::Ordering;
|
||||
|
||||
/// Decides how to deal with two transactions from a sender that seem to occupy the same slot in the queue.
|
||||
fn choose(&self, old: &T, new: &T) -> Choice;
|
||||
|
||||
/// Updates the transaction scores given a list of transactions and a change to previous scoring.
|
||||
/// NOTE: you can safely assume that both slices have the same length.
|
||||
/// (i.e. score at index `i` represents transaction at the same index)
|
||||
fn update_scores(&self, txs: &[Transaction<T>], scores: &mut [Self::Score], change: Change<Self::Event>);
|
||||
|
||||
/// Decides if `new` should push out `old` transaction from the pool.
|
||||
///
|
||||
/// NOTE returning `InsertNew` here can lead to some transactions being accepted above pool limits.
|
||||
fn should_replace(&self, old: &T, new: &T) -> Choice;
|
||||
|
||||
/// Decides if the transaction should ignore per-sender limit in the pool.
|
||||
///
|
||||
/// If you return `true` for given transaction it's going to be accepted even though
|
||||
/// the per-sender limit is exceeded.
|
||||
fn should_ignore_sender_limit(&self, _new: &T) -> bool { false }
|
||||
}
|
||||
|
||||
/// A score with a reference to the transaction.
|
||||
#[derive(Debug)]
|
||||
pub struct ScoreWithRef<T, S> {
|
||||
/// Score
|
||||
pub score: S,
|
||||
/// Shared transaction
|
||||
pub transaction: Transaction<T>,
|
||||
}
|
||||
|
||||
impl<T, S> ScoreWithRef<T, S> {
|
||||
/// Creates a new `ScoreWithRef`
|
||||
pub fn new(score: S, transaction: Transaction<T>) -> Self {
|
||||
ScoreWithRef { score, transaction }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S: Clone> Clone for ScoreWithRef<T, S> {
|
||||
fn clone(&self) -> Self {
|
||||
ScoreWithRef {
|
||||
score: self.score.clone(),
|
||||
transaction: self.transaction.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: cmp::Ord, T> Ord for ScoreWithRef<T, S> {
|
||||
fn cmp(&self, other: &Self) -> cmp::Ordering {
|
||||
other.score.cmp(&self.score)
|
||||
.then(other.transaction.insertion_id.cmp(&self.transaction.insertion_id))
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: cmp::Ord, T> PartialOrd for ScoreWithRef<T, S> {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: cmp::Ord, T> PartialEq for ScoreWithRef<T, S> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.score == other.score && self.transaction.insertion_id == other.transaction.insertion_id
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: cmp::Ord, T> Eq for ScoreWithRef<T, S> {}
|
@ -1,40 +0,0 @@
|
||||
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
/// Light pool status.
|
||||
/// This status is cheap to compute and can be called frequently.
|
||||
#[derive(Default, Debug, Clone, PartialEq, Eq)]
|
||||
pub struct LightStatus {
|
||||
/// Memory usage in bytes.
|
||||
pub mem_usage: usize,
|
||||
/// Total number of transactions in the pool.
|
||||
pub transaction_count: usize,
|
||||
/// Number of unique senders in the pool.
|
||||
pub senders: usize,
|
||||
}
|
||||
|
||||
/// A full queue status.
|
||||
/// To compute this status it is required to provide `Ready`.
|
||||
/// NOTE: To compute the status we need to visit each transaction in the pool.
|
||||
#[derive(Default, Debug, Clone, PartialEq, Eq)]
|
||||
pub struct Status {
|
||||
/// Number of stalled transactions.
|
||||
pub stalled: usize,
|
||||
/// Number of pending (ready) transactions.
|
||||
pub pending: usize,
|
||||
/// Number of future (not ready) transactions.
|
||||
pub future: usize,
|
||||
}
|
@ -1,110 +0,0 @@
|
||||
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::cmp;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use ethereum_types::{H160 as Sender, U256};
|
||||
use {pool, scoring, Scoring, Ready, Readiness};
|
||||
use super::Transaction;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct DummyScoring {
|
||||
always_insert: bool,
|
||||
}
|
||||
|
||||
impl DummyScoring {
|
||||
pub fn always_insert() -> Self {
|
||||
DummyScoring {
|
||||
always_insert: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Scoring<Transaction> for DummyScoring {
|
||||
type Score = U256;
|
||||
type Event = ();
|
||||
|
||||
fn compare(&self, old: &Transaction, new: &Transaction) -> cmp::Ordering {
|
||||
old.nonce.cmp(&new.nonce)
|
||||
}
|
||||
|
||||
fn choose(&self, old: &Transaction, new: &Transaction) -> scoring::Choice {
|
||||
if old.nonce == new.nonce {
|
||||
if new.gas_price > old.gas_price {
|
||||
scoring::Choice::ReplaceOld
|
||||
} else {
|
||||
scoring::Choice::RejectNew
|
||||
}
|
||||
} else {
|
||||
scoring::Choice::InsertNew
|
||||
}
|
||||
}
|
||||
|
||||
fn update_scores(&self, txs: &[pool::Transaction<Transaction>], scores: &mut [Self::Score], change: scoring::Change) {
|
||||
if let scoring::Change::Event(_) = change {
|
||||
// In case of event reset all scores to 0
|
||||
for i in 0..txs.len() {
|
||||
scores[i] = 0.into();
|
||||
}
|
||||
} else {
|
||||
// Set to a gas price otherwise
|
||||
for i in 0..txs.len() {
|
||||
scores[i] = txs[i].gas_price;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn should_replace(&self, old: &Transaction, new: &Transaction) -> scoring::Choice {
|
||||
if self.always_insert {
|
||||
scoring::Choice::InsertNew
|
||||
} else if new.gas_price > old.gas_price {
|
||||
scoring::Choice::ReplaceOld
|
||||
} else {
|
||||
scoring::Choice::RejectNew
|
||||
}
|
||||
}
|
||||
|
||||
fn should_ignore_sender_limit(&self, _new: &Transaction) -> bool {
|
||||
self.always_insert
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct NonceReady(HashMap<Sender, U256>, U256);
|
||||
|
||||
impl NonceReady {
|
||||
pub fn new<T: Into<U256>>(min: T) -> Self {
|
||||
let mut n = NonceReady::default();
|
||||
n.1 = min.into();
|
||||
n
|
||||
}
|
||||
}
|
||||
|
||||
impl Ready<Transaction> for NonceReady {
|
||||
fn is_ready(&mut self, tx: &Transaction) -> Readiness {
|
||||
let min = self.1;
|
||||
let nonce = self.0.entry(tx.sender).or_insert_with(|| min);
|
||||
match tx.nonce.cmp(nonce) {
|
||||
cmp::Ordering::Greater => Readiness::Future,
|
||||
cmp::Ordering::Equal => {
|
||||
*nonce = *nonce + 1.into();
|
||||
Readiness::Ready
|
||||
},
|
||||
cmp::Ordering::Less => Readiness::Stale,
|
||||
}
|
||||
}
|
||||
}
|
@ -1,748 +0,0 @@
|
||||
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
mod helpers;
|
||||
mod tx_builder;
|
||||
|
||||
use self::helpers::{DummyScoring, NonceReady};
|
||||
use self::tx_builder::TransactionBuilder;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use ethereum_types::{H256, U256, Address};
|
||||
use super::*;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct Transaction {
|
||||
pub hash: H256,
|
||||
pub nonce: U256,
|
||||
pub gas_price: U256,
|
||||
pub gas: U256,
|
||||
pub sender: Address,
|
||||
pub mem_usage: usize,
|
||||
}
|
||||
|
||||
impl VerifiedTransaction for Transaction {
|
||||
type Hash = H256;
|
||||
type Sender = Address;
|
||||
|
||||
fn hash(&self) -> &H256 { &self.hash }
|
||||
fn mem_usage(&self) -> usize { self.mem_usage }
|
||||
fn sender(&self) -> &Address { &self.sender }
|
||||
}
|
||||
|
||||
pub type SharedTransaction = Arc<Transaction>;
|
||||
|
||||
type TestPool = Pool<Transaction, DummyScoring>;
|
||||
|
||||
impl TestPool {
|
||||
pub fn with_limit(max_count: usize) -> Self {
|
||||
Self::with_options(Options {
|
||||
max_count,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_clear_queue() {
|
||||
// given
|
||||
let b = TransactionBuilder::default();
|
||||
let mut txq = TestPool::default();
|
||||
assert_eq!(txq.light_status(), LightStatus {
|
||||
mem_usage: 0,
|
||||
transaction_count: 0,
|
||||
senders: 0,
|
||||
});
|
||||
let tx1 = b.tx().nonce(0).new();
|
||||
let tx2 = b.tx().nonce(1).mem_usage(1).new();
|
||||
|
||||
// add
|
||||
txq.import(tx1).unwrap();
|
||||
txq.import(tx2).unwrap();
|
||||
assert_eq!(txq.light_status(), LightStatus {
|
||||
mem_usage: 1,
|
||||
transaction_count: 2,
|
||||
senders: 1,
|
||||
});
|
||||
|
||||
// when
|
||||
txq.clear();
|
||||
|
||||
// then
|
||||
assert_eq!(txq.light_status(), LightStatus {
|
||||
mem_usage: 0,
|
||||
transaction_count: 0,
|
||||
senders: 0,
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_not_allow_same_transaction_twice() {
|
||||
// given
|
||||
let b = TransactionBuilder::default();
|
||||
let mut txq = TestPool::default();
|
||||
let tx1 = b.tx().nonce(0).new();
|
||||
let tx2 = b.tx().nonce(0).new();
|
||||
|
||||
// when
|
||||
txq.import(tx1).unwrap();
|
||||
txq.import(tx2).unwrap_err();
|
||||
|
||||
// then
|
||||
assert_eq!(txq.light_status().transaction_count, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_replace_transaction() {
|
||||
// given
|
||||
let b = TransactionBuilder::default();
|
||||
let mut txq = TestPool::default();
|
||||
let tx1 = b.tx().nonce(0).gas_price(1).new();
|
||||
let tx2 = b.tx().nonce(0).gas_price(2).new();
|
||||
|
||||
// when
|
||||
txq.import(tx1).unwrap();
|
||||
txq.import(tx2).unwrap();
|
||||
|
||||
// then
|
||||
assert_eq!(txq.light_status().transaction_count, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_reject_if_above_count() {
|
||||
let b = TransactionBuilder::default();
|
||||
let mut txq = TestPool::with_options(Options {
|
||||
max_count: 1,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
// Reject second
|
||||
let tx1 = b.tx().nonce(0).new();
|
||||
let tx2 = b.tx().nonce(1).new();
|
||||
let hash = format!("{:?}", tx2.hash());
|
||||
txq.import(tx1).unwrap();
|
||||
assert_eq!(txq.import(tx2).unwrap_err().kind(), &error::ErrorKind::TooCheapToEnter(hash, "0x0".into()));
|
||||
assert_eq!(txq.light_status().transaction_count, 1);
|
||||
|
||||
txq.clear();
|
||||
|
||||
// Replace first
|
||||
let tx1 = b.tx().nonce(0).new();
|
||||
let tx2 = b.tx().nonce(0).sender(1).gas_price(2).new();
|
||||
txq.import(tx1).unwrap();
|
||||
txq.import(tx2).unwrap();
|
||||
assert_eq!(txq.light_status().transaction_count, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_reject_if_above_mem_usage() {
|
||||
let b = TransactionBuilder::default();
|
||||
let mut txq = TestPool::with_options(Options {
|
||||
max_mem_usage: 1,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
// Reject second
|
||||
let tx1 = b.tx().nonce(1).mem_usage(1).new();
|
||||
let tx2 = b.tx().nonce(2).mem_usage(2).new();
|
||||
let hash = format!("{:?}", tx2.hash());
|
||||
txq.import(tx1).unwrap();
|
||||
assert_eq!(txq.import(tx2).unwrap_err().kind(), &error::ErrorKind::TooCheapToEnter(hash, "0x0".into()));
|
||||
assert_eq!(txq.light_status().transaction_count, 1);
|
||||
|
||||
txq.clear();
|
||||
|
||||
// Replace first
|
||||
let tx1 = b.tx().nonce(1).mem_usage(1).new();
|
||||
let tx2 = b.tx().nonce(1).sender(1).gas_price(2).mem_usage(1).new();
|
||||
txq.import(tx1).unwrap();
|
||||
txq.import(tx2).unwrap();
|
||||
assert_eq!(txq.light_status().transaction_count, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_reject_if_above_sender_count() {
|
||||
let b = TransactionBuilder::default();
|
||||
let mut txq = TestPool::with_options(Options {
|
||||
max_per_sender: 1,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
// Reject second
|
||||
let tx1 = b.tx().nonce(1).new();
|
||||
let tx2 = b.tx().nonce(2).new();
|
||||
let hash = format!("{:x}", tx2.hash());
|
||||
txq.import(tx1).unwrap();
|
||||
assert_eq!(txq.import(tx2).unwrap_err().kind(), &error::ErrorKind::TooCheapToEnter(hash, "0x0".into()));
|
||||
assert_eq!(txq.light_status().transaction_count, 1);
|
||||
|
||||
txq.clear();
|
||||
|
||||
// Replace first
|
||||
let tx1 = b.tx().nonce(1).new();
|
||||
let tx2 = b.tx().nonce(2).gas_price(2).new();
|
||||
let hash = format!("{:x}", tx2.hash());
|
||||
txq.import(tx1).unwrap();
|
||||
// This results in error because we also compare nonces
|
||||
assert_eq!(txq.import(tx2).unwrap_err().kind(), &error::ErrorKind::TooCheapToEnter(hash, "0x0".into()));
|
||||
assert_eq!(txq.light_status().transaction_count, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_construct_pending() {
|
||||
// given
|
||||
let b = TransactionBuilder::default();
|
||||
let mut txq = TestPool::default();
|
||||
|
||||
let tx0 = txq.import(b.tx().nonce(0).gas_price(5).new()).unwrap();
|
||||
let tx1 = txq.import(b.tx().nonce(1).gas_price(5).new()).unwrap();
|
||||
let tx2 = txq.import(b.tx().nonce(2).new()).unwrap();
|
||||
// this transaction doesn't get to the block despite high gas price
|
||||
// because of block gas limit and simplistic ordering algorithm.
|
||||
txq.import(b.tx().nonce(3).gas_price(4).new()).unwrap();
|
||||
//gap
|
||||
txq.import(b.tx().nonce(5).new()).unwrap();
|
||||
|
||||
let tx5 = txq.import(b.tx().sender(1).nonce(0).new()).unwrap();
|
||||
let tx6 = txq.import(b.tx().sender(1).nonce(1).new()).unwrap();
|
||||
let tx7 = txq.import(b.tx().sender(1).nonce(2).new()).unwrap();
|
||||
let tx8 = txq.import(b.tx().sender(1).nonce(3).gas_price(4).new()).unwrap();
|
||||
// gap
|
||||
txq.import(b.tx().sender(1).nonce(5).new()).unwrap();
|
||||
|
||||
let tx9 = txq.import(b.tx().sender(2).nonce(0).new()).unwrap();
|
||||
assert_eq!(txq.light_status().transaction_count, 11);
|
||||
assert_eq!(txq.status(NonceReady::default()), Status {
|
||||
stalled: 0,
|
||||
pending: 9,
|
||||
future: 2,
|
||||
});
|
||||
assert_eq!(txq.status(NonceReady::new(1)), Status {
|
||||
stalled: 3,
|
||||
pending: 6,
|
||||
future: 2,
|
||||
});
|
||||
|
||||
// when
|
||||
let mut current_gas = U256::zero();
|
||||
let limit = (21_000 * 8).into();
|
||||
let mut pending = txq.pending(NonceReady::default()).take_while(|tx| {
|
||||
let should_take = tx.gas + current_gas <= limit;
|
||||
if should_take {
|
||||
current_gas = current_gas + tx.gas
|
||||
}
|
||||
should_take
|
||||
});
|
||||
|
||||
assert_eq!(pending.next(), Some(tx0));
|
||||
assert_eq!(pending.next(), Some(tx1));
|
||||
assert_eq!(pending.next(), Some(tx9));
|
||||
assert_eq!(pending.next(), Some(tx5));
|
||||
assert_eq!(pending.next(), Some(tx6));
|
||||
assert_eq!(pending.next(), Some(tx7));
|
||||
assert_eq!(pending.next(), Some(tx8));
|
||||
assert_eq!(pending.next(), Some(tx2));
|
||||
assert_eq!(pending.next(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_return_unordered_iterator() {
|
||||
// given
|
||||
let b = TransactionBuilder::default();
|
||||
let mut txq = TestPool::default();
|
||||
|
||||
let tx0 = txq.import(b.tx().nonce(0).gas_price(5).new()).unwrap();
|
||||
let tx1 = txq.import(b.tx().nonce(1).gas_price(5).new()).unwrap();
|
||||
let tx2 = txq.import(b.tx().nonce(2).new()).unwrap();
|
||||
let tx3 = txq.import(b.tx().nonce(3).gas_price(4).new()).unwrap();
|
||||
//gap
|
||||
txq.import(b.tx().nonce(5).new()).unwrap();
|
||||
|
||||
let tx5 = txq.import(b.tx().sender(1).nonce(0).new()).unwrap();
|
||||
let tx6 = txq.import(b.tx().sender(1).nonce(1).new()).unwrap();
|
||||
let tx7 = txq.import(b.tx().sender(1).nonce(2).new()).unwrap();
|
||||
let tx8 = txq.import(b.tx().sender(1).nonce(3).gas_price(4).new()).unwrap();
|
||||
// gap
|
||||
txq.import(b.tx().sender(1).nonce(5).new()).unwrap();
|
||||
|
||||
let tx9 = txq.import(b.tx().sender(2).nonce(0).new()).unwrap();
|
||||
assert_eq!(txq.light_status().transaction_count, 11);
|
||||
assert_eq!(txq.status(NonceReady::default()), Status {
|
||||
stalled: 0,
|
||||
pending: 9,
|
||||
future: 2,
|
||||
});
|
||||
assert_eq!(txq.status(NonceReady::new(1)), Status {
|
||||
stalled: 3,
|
||||
pending: 6,
|
||||
future: 2,
|
||||
});
|
||||
|
||||
// when
|
||||
let all: Vec<_> = txq.unordered_pending(NonceReady::default()).collect();
|
||||
|
||||
let chain1 = vec![tx0, tx1, tx2, tx3];
|
||||
let chain2 = vec![tx5, tx6, tx7, tx8];
|
||||
let chain3 = vec![tx9];
|
||||
|
||||
assert_eq!(all.len(), chain1.len() + chain2.len() + chain3.len());
|
||||
|
||||
let mut options = vec![
|
||||
vec![chain1.clone(), chain2.clone(), chain3.clone()],
|
||||
vec![chain2.clone(), chain1.clone(), chain3.clone()],
|
||||
vec![chain2.clone(), chain3.clone(), chain1.clone()],
|
||||
vec![chain3.clone(), chain2.clone(), chain1.clone()],
|
||||
vec![chain3.clone(), chain1.clone(), chain2.clone()],
|
||||
vec![chain1.clone(), chain3.clone(), chain2.clone()],
|
||||
].into_iter().map(|mut v| {
|
||||
let mut first = v.pop().unwrap();
|
||||
for mut x in v {
|
||||
first.append(&mut x);
|
||||
}
|
||||
first
|
||||
});
|
||||
|
||||
assert!(options.any(|opt| all == opt));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_update_scoring_correctly() {
|
||||
// given
|
||||
let b = TransactionBuilder::default();
|
||||
let mut txq = TestPool::default();
|
||||
|
||||
let tx0 = txq.import(b.tx().nonce(0).gas_price(5).new()).unwrap();
|
||||
let tx1 = txq.import(b.tx().nonce(1).gas_price(5).new()).unwrap();
|
||||
let tx2 = txq.import(b.tx().nonce(2).new()).unwrap();
|
||||
// this transaction doesn't get to the block despite high gas price
|
||||
// because of block gas limit and simplistic ordering algorithm.
|
||||
txq.import(b.tx().nonce(3).gas_price(4).new()).unwrap();
|
||||
//gap
|
||||
txq.import(b.tx().nonce(5).new()).unwrap();
|
||||
|
||||
let tx5 = txq.import(b.tx().sender(1).nonce(0).new()).unwrap();
|
||||
let tx6 = txq.import(b.tx().sender(1).nonce(1).new()).unwrap();
|
||||
let tx7 = txq.import(b.tx().sender(1).nonce(2).new()).unwrap();
|
||||
let tx8 = txq.import(b.tx().sender(1).nonce(3).gas_price(4).new()).unwrap();
|
||||
// gap
|
||||
txq.import(b.tx().sender(1).nonce(5).new()).unwrap();
|
||||
|
||||
let tx9 = txq.import(b.tx().sender(2).nonce(0).new()).unwrap();
|
||||
assert_eq!(txq.light_status().transaction_count, 11);
|
||||
assert_eq!(txq.status(NonceReady::default()), Status {
|
||||
stalled: 0,
|
||||
pending: 9,
|
||||
future: 2,
|
||||
});
|
||||
assert_eq!(txq.status(NonceReady::new(1)), Status {
|
||||
stalled: 3,
|
||||
pending: 6,
|
||||
future: 2,
|
||||
});
|
||||
|
||||
txq.update_scores(&0.into(), ());
|
||||
|
||||
// when
|
||||
let mut current_gas = U256::zero();
|
||||
let limit = (21_000 * 8).into();
|
||||
let mut pending = txq.pending(NonceReady::default()).take_while(|tx| {
|
||||
let should_take = tx.gas + current_gas <= limit;
|
||||
if should_take {
|
||||
current_gas = current_gas + tx.gas
|
||||
}
|
||||
should_take
|
||||
});
|
||||
|
||||
assert_eq!(pending.next(), Some(tx9));
|
||||
assert_eq!(pending.next(), Some(tx5));
|
||||
assert_eq!(pending.next(), Some(tx6));
|
||||
assert_eq!(pending.next(), Some(tx7));
|
||||
assert_eq!(pending.next(), Some(tx8));
|
||||
// penalized transactions
|
||||
assert_eq!(pending.next(), Some(tx0));
|
||||
assert_eq!(pending.next(), Some(tx1));
|
||||
assert_eq!(pending.next(), Some(tx2));
|
||||
assert_eq!(pending.next(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_remove_transaction() {
|
||||
// given
|
||||
let b = TransactionBuilder::default();
|
||||
let mut txq = TestPool::default();
|
||||
|
||||
let tx1 = txq.import(b.tx().nonce(0).new()).unwrap();
|
||||
let tx2 = txq.import(b.tx().nonce(1).new()).unwrap();
|
||||
txq.import(b.tx().nonce(2).new()).unwrap();
|
||||
assert_eq!(txq.light_status().transaction_count, 3);
|
||||
|
||||
// when
|
||||
assert!(txq.remove(&tx2.hash(), false).is_some());
|
||||
|
||||
// then
|
||||
assert_eq!(txq.light_status().transaction_count, 2);
|
||||
let mut pending = txq.pending(NonceReady::default());
|
||||
assert_eq!(pending.next(), Some(tx1));
|
||||
assert_eq!(pending.next(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_cull_stalled_transactions() {
|
||||
// given
|
||||
let b = TransactionBuilder::default();
|
||||
let mut txq = TestPool::default();
|
||||
|
||||
txq.import(b.tx().nonce(0).gas_price(5).new()).unwrap();
|
||||
txq.import(b.tx().nonce(1).new()).unwrap();
|
||||
txq.import(b.tx().nonce(3).new()).unwrap();
|
||||
|
||||
txq.import(b.tx().sender(1).nonce(0).new()).unwrap();
|
||||
txq.import(b.tx().sender(1).nonce(1).new()).unwrap();
|
||||
txq.import(b.tx().sender(1).nonce(5).new()).unwrap();
|
||||
|
||||
assert_eq!(txq.status(NonceReady::new(1)), Status {
|
||||
stalled: 2,
|
||||
pending: 2,
|
||||
future: 2,
|
||||
});
|
||||
|
||||
// when
|
||||
assert_eq!(txq.cull(None, NonceReady::new(1)), 2);
|
||||
|
||||
// then
|
||||
assert_eq!(txq.status(NonceReady::new(1)), Status {
|
||||
stalled: 0,
|
||||
pending: 2,
|
||||
future: 2,
|
||||
});
|
||||
assert_eq!(txq.light_status(), LightStatus {
|
||||
transaction_count: 4,
|
||||
senders: 2,
|
||||
mem_usage: 0,
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_cull_stalled_transactions_from_a_sender() {
|
||||
// given
|
||||
let b = TransactionBuilder::default();
|
||||
let mut txq = TestPool::default();
|
||||
|
||||
txq.import(b.tx().nonce(0).gas_price(5).new()).unwrap();
|
||||
txq.import(b.tx().nonce(1).new()).unwrap();
|
||||
|
||||
txq.import(b.tx().sender(1).nonce(0).new()).unwrap();
|
||||
txq.import(b.tx().sender(1).nonce(1).new()).unwrap();
|
||||
txq.import(b.tx().sender(1).nonce(2).new()).unwrap();
|
||||
|
||||
assert_eq!(txq.status(NonceReady::new(2)), Status {
|
||||
stalled: 4,
|
||||
pending: 1,
|
||||
future: 0,
|
||||
});
|
||||
|
||||
// when
|
||||
let sender = 0.into();
|
||||
assert_eq!(txq.cull(Some(&[sender]), NonceReady::new(2)), 2);
|
||||
|
||||
// then
|
||||
assert_eq!(txq.status(NonceReady::new(2)), Status {
|
||||
stalled: 2,
|
||||
pending: 1,
|
||||
future: 0,
|
||||
});
|
||||
assert_eq!(txq.light_status(), LightStatus {
|
||||
transaction_count: 3,
|
||||
senders: 1,
|
||||
mem_usage: 0,
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_re_insert_after_cull() {
|
||||
// given
|
||||
let b = TransactionBuilder::default();
|
||||
let mut txq = TestPool::default();
|
||||
|
||||
txq.import(b.tx().nonce(0).gas_price(5).new()).unwrap();
|
||||
txq.import(b.tx().nonce(1).new()).unwrap();
|
||||
txq.import(b.tx().sender(1).nonce(0).new()).unwrap();
|
||||
txq.import(b.tx().sender(1).nonce(1).new()).unwrap();
|
||||
assert_eq!(txq.status(NonceReady::new(1)), Status {
|
||||
stalled: 2,
|
||||
pending: 2,
|
||||
future: 0,
|
||||
});
|
||||
|
||||
// when
|
||||
assert_eq!(txq.cull(None, NonceReady::new(1)), 2);
|
||||
assert_eq!(txq.status(NonceReady::new(1)), Status {
|
||||
stalled: 0,
|
||||
pending: 2,
|
||||
future: 0,
|
||||
});
|
||||
txq.import(b.tx().nonce(0).gas_price(5).new()).unwrap();
|
||||
txq.import(b.tx().sender(1).nonce(0).new()).unwrap();
|
||||
|
||||
assert_eq!(txq.status(NonceReady::new(1)), Status {
|
||||
stalled: 2,
|
||||
pending: 2,
|
||||
future: 0,
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_return_worst_transaction() {
|
||||
// given
|
||||
let b = TransactionBuilder::default();
|
||||
let mut txq = TestPool::default();
|
||||
assert!(txq.worst_transaction().is_none());
|
||||
|
||||
// when
|
||||
txq.import(b.tx().nonce(0).gas_price(5).new()).unwrap();
|
||||
txq.import(b.tx().sender(1).nonce(0).gas_price(4).new()).unwrap();
|
||||
|
||||
// then
|
||||
assert_eq!(txq.worst_transaction().unwrap().gas_price, 4.into());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_return_is_full() {
|
||||
// given
|
||||
let b = TransactionBuilder::default();
|
||||
let mut txq = TestPool::with_limit(2);
|
||||
assert!(!txq.is_full());
|
||||
|
||||
// when
|
||||
txq.import(b.tx().nonce(0).gas_price(110).new()).unwrap();
|
||||
assert!(!txq.is_full());
|
||||
|
||||
txq.import(b.tx().sender(1).nonce(0).gas_price(100).new()).unwrap();
|
||||
|
||||
// then
|
||||
assert!(txq.is_full());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_import_even_if_limit_is_reached_and_should_replace_returns_insert_new() {
|
||||
// given
|
||||
let b = TransactionBuilder::default();
|
||||
let mut txq = TestPool::with_scoring(DummyScoring::always_insert(), Options {
|
||||
max_count: 1,
|
||||
..Default::default()
|
||||
});
|
||||
txq.import(b.tx().nonce(0).gas_price(5).new()).unwrap();
|
||||
assert_eq!(txq.light_status(), LightStatus {
|
||||
transaction_count: 1,
|
||||
senders: 1,
|
||||
mem_usage: 0,
|
||||
});
|
||||
|
||||
// when
|
||||
txq.import(b.tx().nonce(1).gas_price(5).new()).unwrap();
|
||||
|
||||
// then
|
||||
assert_eq!(txq.light_status(), LightStatus {
|
||||
transaction_count: 2,
|
||||
senders: 1,
|
||||
mem_usage: 0,
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_not_import_even_if_limit_is_reached_and_should_replace_returns_false() {
|
||||
// given
|
||||
let b = TransactionBuilder::default();
|
||||
let mut txq = TestPool::with_scoring(DummyScoring::default(), Options {
|
||||
max_count: 1,
|
||||
..Default::default()
|
||||
});
|
||||
txq.import(b.tx().nonce(0).gas_price(5).new()).unwrap();
|
||||
assert_eq!(txq.light_status(), LightStatus {
|
||||
transaction_count: 1,
|
||||
senders: 1,
|
||||
mem_usage: 0,
|
||||
});
|
||||
|
||||
// when
|
||||
let err = txq.import(b.tx().nonce(1).gas_price(5).new()).unwrap_err();
|
||||
|
||||
// then
|
||||
assert_eq!(err.kind(),
|
||||
&error::ErrorKind::TooCheapToEnter("0x00000000000000000000000000000000000000000000000000000000000001f5".into(), "0x5".into()));
|
||||
assert_eq!(txq.light_status(), LightStatus {
|
||||
transaction_count: 1,
|
||||
senders: 1,
|
||||
mem_usage: 0,
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_import_even_if_sender_limit_is_reached() {
|
||||
// given
|
||||
let b = TransactionBuilder::default();
|
||||
let mut txq = TestPool::with_scoring(DummyScoring::always_insert(), Options {
|
||||
max_count: 1,
|
||||
max_per_sender: 1,
|
||||
..Default::default()
|
||||
});
|
||||
txq.import(b.tx().nonce(0).gas_price(5).new()).unwrap();
|
||||
assert_eq!(txq.light_status(), LightStatus {
|
||||
transaction_count: 1,
|
||||
senders: 1,
|
||||
mem_usage: 0,
|
||||
});
|
||||
|
||||
// when
|
||||
txq.import(b.tx().nonce(1).gas_price(5).new()).unwrap();
|
||||
|
||||
// then
|
||||
assert_eq!(txq.light_status(), LightStatus {
|
||||
transaction_count: 2,
|
||||
senders: 1,
|
||||
mem_usage: 0,
|
||||
});
|
||||
}
|
||||
|
||||
mod listener {
|
||||
use std::cell::RefCell;
|
||||
use std::rc::Rc;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[derive(Default)]
|
||||
struct MyListener(pub Rc<RefCell<Vec<&'static str>>>);
|
||||
|
||||
impl Listener<Transaction> for MyListener {
|
||||
fn added(&mut self, _tx: &SharedTransaction, old: Option<&SharedTransaction>) {
|
||||
self.0.borrow_mut().push(if old.is_some() { "replaced" } else { "added" });
|
||||
}
|
||||
|
||||
fn rejected(&mut self, _tx: &SharedTransaction, _reason: &error::ErrorKind) {
|
||||
self.0.borrow_mut().push("rejected".into());
|
||||
}
|
||||
|
||||
fn dropped(&mut self, _tx: &SharedTransaction, _new: Option<&Transaction>) {
|
||||
self.0.borrow_mut().push("dropped".into());
|
||||
}
|
||||
|
||||
fn invalid(&mut self, _tx: &SharedTransaction) {
|
||||
self.0.borrow_mut().push("invalid".into());
|
||||
}
|
||||
|
||||
fn canceled(&mut self, _tx: &SharedTransaction) {
|
||||
self.0.borrow_mut().push("canceled".into());
|
||||
}
|
||||
|
||||
fn culled(&mut self, _tx: &SharedTransaction) {
|
||||
self.0.borrow_mut().push("culled".into());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insert_transaction() {
|
||||
let b = TransactionBuilder::default();
|
||||
let listener = MyListener::default();
|
||||
let results = listener.0.clone();
|
||||
let mut txq = Pool::new(listener, DummyScoring::default(), Options {
|
||||
max_per_sender: 1,
|
||||
max_count: 2,
|
||||
..Default::default()
|
||||
});
|
||||
assert!(results.borrow().is_empty());
|
||||
|
||||
// Regular import
|
||||
txq.import(b.tx().nonce(1).new()).unwrap();
|
||||
assert_eq!(*results.borrow(), &["added"]);
|
||||
// Already present (no notification)
|
||||
txq.import(b.tx().nonce(1).new()).unwrap_err();
|
||||
assert_eq!(*results.borrow(), &["added"]);
|
||||
// Push out the first one
|
||||
txq.import(b.tx().nonce(1).gas_price(1).new()).unwrap();
|
||||
assert_eq!(*results.borrow(), &["added", "replaced"]);
|
||||
// Reject
|
||||
txq.import(b.tx().nonce(1).new()).unwrap_err();
|
||||
assert_eq!(*results.borrow(), &["added", "replaced", "rejected"]);
|
||||
results.borrow_mut().clear();
|
||||
// Different sender (accept)
|
||||
txq.import(b.tx().sender(1).nonce(1).gas_price(2).new()).unwrap();
|
||||
assert_eq!(*results.borrow(), &["added"]);
|
||||
// Third sender push out low gas price
|
||||
txq.import(b.tx().sender(2).nonce(1).gas_price(4).new()).unwrap();
|
||||
assert_eq!(*results.borrow(), &["added", "dropped", "added"]);
|
||||
// Reject (too cheap)
|
||||
txq.import(b.tx().sender(2).nonce(1).gas_price(2).new()).unwrap_err();
|
||||
assert_eq!(*results.borrow(), &["added", "dropped", "added", "rejected"]);
|
||||
|
||||
assert_eq!(txq.light_status().transaction_count, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn remove_transaction() {
|
||||
let b = TransactionBuilder::default();
|
||||
let listener = MyListener::default();
|
||||
let results = listener.0.clone();
|
||||
let mut txq = Pool::new(listener, DummyScoring::default(), Options::default());
|
||||
|
||||
// insert
|
||||
let tx1 = txq.import(b.tx().nonce(1).new()).unwrap();
|
||||
let tx2 = txq.import(b.tx().nonce(2).new()).unwrap();
|
||||
|
||||
// then
|
||||
txq.remove(&tx1.hash(), false);
|
||||
assert_eq!(*results.borrow(), &["added", "added", "canceled"]);
|
||||
txq.remove(&tx2.hash(), true);
|
||||
assert_eq!(*results.borrow(), &["added", "added", "canceled", "invalid"]);
|
||||
assert_eq!(txq.light_status().transaction_count, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clear_queue() {
|
||||
let b = TransactionBuilder::default();
|
||||
let listener = MyListener::default();
|
||||
let results = listener.0.clone();
|
||||
let mut txq = Pool::new(listener, DummyScoring::default(), Options::default());
|
||||
|
||||
// insert
|
||||
txq.import(b.tx().nonce(1).new()).unwrap();
|
||||
txq.import(b.tx().nonce(2).new()).unwrap();
|
||||
|
||||
// when
|
||||
txq.clear();
|
||||
|
||||
// then
|
||||
assert_eq!(*results.borrow(), &["added", "added", "dropped", "dropped"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cull_stalled() {
|
||||
let b = TransactionBuilder::default();
|
||||
let listener = MyListener::default();
|
||||
let results = listener.0.clone();
|
||||
let mut txq = Pool::new(listener, DummyScoring::default(), Options::default());
|
||||
|
||||
// insert
|
||||
txq.import(b.tx().nonce(1).new()).unwrap();
|
||||
txq.import(b.tx().nonce(2).new()).unwrap();
|
||||
|
||||
// when
|
||||
txq.cull(None, NonceReady::new(3));
|
||||
|
||||
// then
|
||||
assert_eq!(*results.borrow(), &["added", "added", "culled", "culled"]);
|
||||
}
|
||||
}
|
@ -1,64 +0,0 @@
|
||||
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use super::{Transaction, U256, Address};
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct TransactionBuilder {
|
||||
nonce: U256,
|
||||
gas_price: U256,
|
||||
gas: U256,
|
||||
sender: Address,
|
||||
mem_usage: usize,
|
||||
}
|
||||
|
||||
impl TransactionBuilder {
|
||||
pub fn tx(&self) -> Self {
|
||||
self.clone()
|
||||
}
|
||||
|
||||
pub fn nonce<T: Into<U256>>(mut self, nonce: T) -> Self {
|
||||
self.nonce = nonce.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn gas_price<T: Into<U256>>(mut self, gas_price: T) -> Self {
|
||||
self.gas_price = gas_price.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn sender<T: Into<Address>>(mut self, sender: T) -> Self {
|
||||
self.sender = sender.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn mem_usage(mut self, mem_usage: usize) -> Self {
|
||||
self.mem_usage = mem_usage;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn new(self) -> Transaction {
|
||||
let hash = self.nonce ^ (U256::from(100) * self.gas_price) ^ (U256::from(100_000) * U256::from(self.sender.low_u64()));
|
||||
Transaction {
|
||||
hash: hash.into(),
|
||||
nonce: self.nonce,
|
||||
gas_price: self.gas_price,
|
||||
gas: 21_000.into(),
|
||||
sender: self.sender,
|
||||
mem_usage: self.mem_usage,
|
||||
}
|
||||
}
|
||||
}
|
@ -1,221 +0,0 @@
|
||||
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::{fmt, mem};
|
||||
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use ready::{Ready, Readiness};
|
||||
use scoring::{self, Scoring};
|
||||
use pool::Transaction;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum AddResult<T, S> {
|
||||
Ok(T),
|
||||
TooCheapToEnter(T, S),
|
||||
TooCheap {
|
||||
old: T,
|
||||
new: T,
|
||||
},
|
||||
Replaced {
|
||||
old: T,
|
||||
new: T,
|
||||
},
|
||||
PushedOut {
|
||||
old: T,
|
||||
new: T,
|
||||
},
|
||||
}
|
||||
|
||||
/// Represents all transactions from a particular sender ordered by nonce.
|
||||
const PER_SENDER: usize = 8;
|
||||
#[derive(Debug)]
|
||||
pub struct Transactions<T, S: Scoring<T>> {
|
||||
// TODO [ToDr] Consider using something that doesn't require shifting all records.
|
||||
transactions: SmallVec<[Transaction<T>; PER_SENDER]>,
|
||||
scores: SmallVec<[S::Score; PER_SENDER]>,
|
||||
}
|
||||
|
||||
impl<T, S: Scoring<T>> Default for Transactions<T, S> {
|
||||
fn default() -> Self {
|
||||
Transactions {
|
||||
transactions: Default::default(),
|
||||
scores: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: fmt::Debug, S: Scoring<T>> Transactions<T, S> {
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.transactions.is_empty()
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.transactions.len()
|
||||
}
|
||||
|
||||
pub fn iter(&self) -> ::std::slice::Iter<Transaction<T>> {
|
||||
self.transactions.iter()
|
||||
}
|
||||
|
||||
pub fn worst_and_best(&self) -> Option<((S::Score, Transaction<T>), (S::Score, Transaction<T>))> {
|
||||
let len = self.scores.len();
|
||||
self.scores.get(0).cloned().map(|best| {
|
||||
let worst = self.scores[len - 1].clone();
|
||||
let best_tx = self.transactions[0].clone();
|
||||
let worst_tx = self.transactions[len - 1].clone();
|
||||
|
||||
((worst, worst_tx), (best, best_tx))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn find_next(&self, tx: &T, scoring: &S) -> Option<(S::Score, Transaction<T>)> {
|
||||
self.transactions.binary_search_by(|old| scoring.compare(old, &tx)).ok().and_then(|index| {
|
||||
let index = index + 1;
|
||||
if index < self.scores.len() {
|
||||
Some((self.scores[index].clone(), self.transactions[index].clone()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn push_cheapest_transaction(&mut self, tx: Transaction<T>, scoring: &S, max_count: usize) -> AddResult<Transaction<T>, S::Score> {
|
||||
let index = self.transactions.len();
|
||||
if index == max_count && !scoring.should_ignore_sender_limit(&tx) {
|
||||
let min_score = self.scores[index - 1].clone();
|
||||
AddResult::TooCheapToEnter(tx, min_score)
|
||||
} else {
|
||||
self.transactions.push(tx.clone());
|
||||
self.scores.push(Default::default());
|
||||
scoring.update_scores(&self.transactions, &mut self.scores, scoring::Change::InsertedAt(index));
|
||||
|
||||
AddResult::Ok(tx)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_scores(&mut self, scoring: &S, event: S::Event) {
|
||||
scoring.update_scores(&self.transactions, &mut self.scores, scoring::Change::Event(event));
|
||||
}
|
||||
|
||||
pub fn add(&mut self, new: Transaction<T>, scoring: &S, max_count: usize) -> AddResult<Transaction<T>, S::Score> {
|
||||
let index = match self.transactions.binary_search_by(|old| scoring.compare(old, &new)) {
|
||||
Ok(index) => index,
|
||||
Err(index) => index,
|
||||
};
|
||||
|
||||
// Insert at the end.
|
||||
if index == self.transactions.len() {
|
||||
return self.push_cheapest_transaction(new, scoring, max_count)
|
||||
}
|
||||
|
||||
// Decide if the transaction should replace some other.
|
||||
match scoring.choose(&self.transactions[index], &new) {
|
||||
// New transaction should be rejected
|
||||
scoring::Choice::RejectNew => AddResult::TooCheap {
|
||||
old: self.transactions[index].clone(),
|
||||
new,
|
||||
},
|
||||
// New transaction should be kept along with old ones.
|
||||
scoring::Choice::InsertNew => {
|
||||
self.transactions.insert(index, new.clone());
|
||||
self.scores.insert(index, Default::default());
|
||||
scoring.update_scores(&self.transactions, &mut self.scores, scoring::Change::InsertedAt(index));
|
||||
|
||||
if self.transactions.len() > max_count {
|
||||
let old = self.transactions.pop().expect("len is non-zero");
|
||||
self.scores.pop();
|
||||
scoring.update_scores(&self.transactions, &mut self.scores, scoring::Change::RemovedAt(self.transactions.len()));
|
||||
|
||||
AddResult::PushedOut {
|
||||
old,
|
||||
new,
|
||||
}
|
||||
} else {
|
||||
AddResult::Ok(new)
|
||||
}
|
||||
},
|
||||
// New transaction is replacing some other transaction already in the queue.
|
||||
scoring::Choice::ReplaceOld => {
|
||||
let old = mem::replace(&mut self.transactions[index], new.clone());
|
||||
scoring.update_scores(&self.transactions, &mut self.scores, scoring::Change::ReplacedAt(index));
|
||||
|
||||
AddResult::Replaced {
|
||||
old,
|
||||
new,
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn remove(&mut self, tx: &T, scoring: &S) -> bool {
|
||||
let index = match self.transactions.binary_search_by(|old| scoring.compare(old, tx)) {
|
||||
Ok(index) => index,
|
||||
Err(_) => {
|
||||
warn!("Attempting to remove non-existent transaction {:?}", tx);
|
||||
return false;
|
||||
},
|
||||
};
|
||||
|
||||
self.transactions.remove(index);
|
||||
self.scores.remove(index);
|
||||
// Update scoring
|
||||
scoring.update_scores(&self.transactions, &mut self.scores, scoring::Change::RemovedAt(index));
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn cull<R: Ready<T>>(&mut self, ready: &mut R, scoring: &S) -> SmallVec<[Transaction<T>; PER_SENDER]> {
|
||||
let mut result = SmallVec::new();
|
||||
if self.is_empty() {
|
||||
return result;
|
||||
}
|
||||
|
||||
let mut first_non_stalled = 0;
|
||||
for tx in &self.transactions {
|
||||
match ready.is_ready(tx) {
|
||||
Readiness::Stale => {
|
||||
first_non_stalled += 1;
|
||||
},
|
||||
Readiness::Ready | Readiness::Future => break,
|
||||
}
|
||||
}
|
||||
|
||||
if first_non_stalled == 0 {
|
||||
return result;
|
||||
}
|
||||
|
||||
// reverse the vectors to easily remove first elements.
|
||||
self.transactions.reverse();
|
||||
self.scores.reverse();
|
||||
|
||||
for _ in 0..first_non_stalled {
|
||||
self.scores.pop();
|
||||
result.push(
|
||||
self.transactions.pop().expect("first_non_stalled is never greater than transactions.len(); qed")
|
||||
);
|
||||
}
|
||||
|
||||
self.transactions.reverse();
|
||||
self.scores.reverse();
|
||||
|
||||
// update scoring
|
||||
scoring.update_scores(&self.transactions, &mut self.scores, scoring::Change::Culled(result.len()));
|
||||
|
||||
// reverse the result to maintain correct order.
|
||||
result.reverse();
|
||||
result
|
||||
}
|
||||
}
|
@ -1,31 +0,0 @@
|
||||
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use {VerifiedTransaction};
|
||||
|
||||
/// Transaction verification.
|
||||
///
|
||||
/// Verifier is responsible to decide if the transaction should even be considered for pool inclusion.
|
||||
pub trait Verifier<U> {
|
||||
/// Verification error.
|
||||
type Error;
|
||||
|
||||
/// Verified transaction.
|
||||
type VerifiedTransaction: VerifiedTransaction;
|
||||
|
||||
/// Verifies a `UnverifiedTransaction` and produces `VerifiedTransaction` instance.
|
||||
fn verify_transaction(&self, tx: U) -> Result<Self::VerifiedTransaction, Self::Error>;
|
||||
}
|
@ -6,22 +6,22 @@ license = "GPL-3.0"
|
||||
authors = ["Parity Technologies <admin@parity.io>"]
|
||||
|
||||
[dependencies]
|
||||
keccak-hash = { git = "https://github.com/paritytech/parity-common" }
|
||||
keccak-hash = "0.1"
|
||||
lazy_static = "1.0"
|
||||
log = "0.4"
|
||||
ethabi = "5.1"
|
||||
ethabi-derive = "5.0"
|
||||
ethabi-contract = "5.0"
|
||||
ethabi = "5.1.2"
|
||||
ethabi-derive = "5.1.3"
|
||||
ethabi-contract = "5.1.1"
|
||||
target_info = "0.1"
|
||||
semver = "0.9"
|
||||
ethcore = { path = "../ethcore" }
|
||||
parity-bytes = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-bytes = "0.1"
|
||||
ethcore-sync = { path = "../ethcore/sync" }
|
||||
ethereum-types = "0.3"
|
||||
ethereum-types = "0.4"
|
||||
parking_lot = "0.6"
|
||||
parity-hash-fetch = { path = "../hash-fetch" }
|
||||
parity-version = { path = "../util/version" }
|
||||
path = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-path = "0.1"
|
||||
rand = "0.4"
|
||||
|
||||
[dev-dependencies]
|
||||
|
@ -27,7 +27,7 @@ extern crate keccak_hash as hash;
|
||||
extern crate parity_hash_fetch as hash_fetch;
|
||||
extern crate parity_version as version;
|
||||
extern crate parking_lot;
|
||||
extern crate path;
|
||||
extern crate parity_path;
|
||||
extern crate rand;
|
||||
extern crate semver;
|
||||
extern crate target_info;
|
||||
|
@ -31,7 +31,7 @@ use ethcore::client::{BlockId, BlockChainClient, ChainNotify, ChainRoute};
|
||||
use ethcore::filter::Filter;
|
||||
use ethereum_types::H256;
|
||||
use hash_fetch::{self as fetch, HashFetch};
|
||||
use path::restrict_permissions_owner;
|
||||
use parity_path::restrict_permissions_owner;
|
||||
use service::Service;
|
||||
use sync::{SyncProvider};
|
||||
use types::{ReleaseInfo, OperationsInfo, CapState, VersionInfo, ReleaseTrack};
|
||||
|
@ -5,7 +5,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
license = "GPL3"
|
||||
|
||||
[dependencies]
|
||||
ethereum-types = "0.3"
|
||||
ethereum-types = "0.4"
|
||||
journaldb = { path = "../journaldb" }
|
||||
app_dirs = { git = "https://github.com/paritytech/app-dirs-rs" }
|
||||
home = "0.3"
|
||||
|
@ -6,5 +6,5 @@ license = "GPL-3.0"
|
||||
authors = ["Parity Technologies <admin@parity.io>"]
|
||||
|
||||
[dependencies]
|
||||
ethereum-types = "0.3"
|
||||
ethereum-types = "0.4"
|
||||
ethkey = { path = "../../ethkey" }
|
||||
|
@ -6,5 +6,5 @@ description = "Specialized version of `HashMap` with H256 keys and fast hashing
|
||||
license = "GPL-3.0"
|
||||
|
||||
[dependencies]
|
||||
ethereum-types = "0.3"
|
||||
plain_hasher = { git = "https://github.com/paritytech/parity-common" }
|
||||
ethereum-types = "0.4"
|
||||
plain_hasher = "0.2"
|
||||
|
@ -6,19 +6,19 @@ description = "A `HashDB` which can manage a short-term journal potentially cont
|
||||
license = "GPL3"
|
||||
|
||||
[dependencies]
|
||||
parity-bytes = { git = "https://github.com/paritytech/parity-common" }
|
||||
ethereum-types = "0.3"
|
||||
hashdb = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-bytes = "0.1"
|
||||
ethereum-types = "0.4"
|
||||
hashdb = "0.2.1"
|
||||
heapsize = "0.4"
|
||||
keccak-hasher = { path = "../keccak-hasher" }
|
||||
kvdb = "0.1.0"
|
||||
kvdb = "0.1"
|
||||
log = "0.4"
|
||||
memorydb = { git = "https://github.com/paritytech/parity-common" }
|
||||
memorydb = "0.2.1"
|
||||
parking_lot = "0.6"
|
||||
fastmap = { path = "../../util/fastmap" }
|
||||
rlp = { git = "https://github.com/paritytech/parity-common" }
|
||||
rlp = { version = "0.2.4", features = ["ethereum"] }
|
||||
|
||||
[dev-dependencies]
|
||||
ethcore-logger = { path = "../../logger" }
|
||||
keccak-hash = { git = "https://github.com/paritytech/parity-common" }
|
||||
kvdb-memorydb = "0.1.0"
|
||||
keccak-hash = "0.1"
|
||||
kvdb-memorydb = "0.1"
|
||||
|
@ -1,12 +1,12 @@
|
||||
[package]
|
||||
name = "keccak-hasher"
|
||||
version = "0.1.0"
|
||||
version = "0.1.1"
|
||||
authors = ["Parity Technologies <admin@parity.io>"]
|
||||
description = "Keccak-256 implementation of the Hasher trait"
|
||||
license = "GPL-3.0"
|
||||
|
||||
[dependencies]
|
||||
ethereum-types = "0.3"
|
||||
ethereum-types = "0.4"
|
||||
tiny-keccak = "1.4.2"
|
||||
hashdb = { git = "https://github.com/paritytech/parity-common" }
|
||||
plain_hasher = { git = "https://github.com/paritytech/parity-common" }
|
||||
hashdb = "0.2.1"
|
||||
plain_hasher = "0.2"
|
||||
|
@ -6,7 +6,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
[dependencies]
|
||||
log = "0.4"
|
||||
macros = { path = "../macros" }
|
||||
kvdb = "0.1.0"
|
||||
kvdb = "0.1"
|
||||
kvdb-rocksdb = "0.1.3"
|
||||
|
||||
[dev-dependencies]
|
||||
|
@ -20,17 +20,17 @@ parking_lot = "0.6"
|
||||
ansi_term = "0.10"
|
||||
rustc-hex = "1.0"
|
||||
ethcore-io = { path = "../io", features = ["mio"] }
|
||||
parity-bytes = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-crypto = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-bytes = "0.1"
|
||||
parity-crypto = "0.1"
|
||||
ethcore-logger = { path ="../../logger" }
|
||||
ethcore-network = { path = "../network" }
|
||||
ethereum-types = "0.3"
|
||||
ethereum-types = "0.4"
|
||||
ethkey = { path = "../../ethkey" }
|
||||
rlp = { git = "https://github.com/paritytech/parity-common" }
|
||||
path = { git = "https://github.com/paritytech/parity-common" }
|
||||
rlp = { version = "0.2.4", features = ["ethereum"] }
|
||||
parity-path = "0.1"
|
||||
ipnetwork = "0.12.6"
|
||||
keccak-hash = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-snappy = "0.1.0"
|
||||
keccak-hash = "0.1"
|
||||
parity-snappy = "0.1"
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
serde_derive = "1.0"
|
||||
|
@ -43,7 +43,7 @@ use network::{NonReservedPeerMode, NetworkContext as NetworkContextTrait};
|
||||
use network::{SessionInfo, Error, ErrorKind, DisconnectReason, NetworkProtocolHandler};
|
||||
use discovery::{Discovery, TableUpdates, NodeEntry, MAX_DATAGRAM_SIZE};
|
||||
use ip_utils::{map_external_address, select_public_address};
|
||||
use path::restrict_permissions_owner;
|
||||
use parity_path::restrict_permissions_owner;
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use network::{ConnectionFilter, ConnectionDirection};
|
||||
|
||||
|
@ -77,7 +77,7 @@ extern crate slab;
|
||||
extern crate ethkey;
|
||||
extern crate rlp;
|
||||
extern crate bytes;
|
||||
extern crate path;
|
||||
extern crate parity_path;
|
||||
extern crate ethcore_logger;
|
||||
extern crate ethcore_network as network;
|
||||
extern crate ipnetwork;
|
||||
|
@ -8,15 +8,14 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
|
||||
[dependencies]
|
||||
error-chain = { version = "0.12", default-features = false }
|
||||
parity-crypto = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-crypto = "0.1"
|
||||
ethcore-io = { path = "../io" }
|
||||
ethereum-types = "0.3"
|
||||
ethereum-types = "0.4"
|
||||
ethkey = { path = "../../ethkey" }
|
||||
ipnetwork = "0.12.6"
|
||||
rlp = { git = "https://github.com/paritytech/parity-common" }
|
||||
rlp = { version = "0.2.4", features = ["ethereum"] }
|
||||
libc = "0.2"
|
||||
parity-snappy = "0.1.0"
|
||||
|
||||
parity-snappy = "0.1"
|
||||
|
||||
[dev-dependencies]
|
||||
assert_matches = "1.2"
|
||||
|
@ -6,10 +6,14 @@ description = "Merkle-Patricia Trie (Ethereum Style)"
|
||||
license = "GPL-3.0"
|
||||
|
||||
[dependencies]
|
||||
patricia-trie = { git = "https://github.com/paritytech/parity-common" }
|
||||
keccak-hasher = { path = "../keccak-hasher" }
|
||||
hashdb = { git = "https://github.com/paritytech/parity-common" }
|
||||
rlp = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-bytes = { git = "https://github.com/paritytech/parity-common" }
|
||||
ethereum-types = "0.3"
|
||||
elastic-array = "0.10"
|
||||
patricia-trie = "0.2.1"
|
||||
keccak-hasher = { version = "0.1.1", path = "../keccak-hasher" }
|
||||
hashdb = "0.2"
|
||||
rlp = { version = "0.2.4", features = ["ethereum"] }
|
||||
parity-bytes = "0.1"
|
||||
ethereum-types = "0.4"
|
||||
elastic-array = "0.10"
|
||||
|
||||
[dev-dependencies]
|
||||
memorydb = "0.2.1"
|
||||
keccak-hash = "0.1.2"
|
||||
|
@ -36,6 +36,36 @@ use rlp::DecoderError;
|
||||
pub type RlpCodec = RlpNodeCodec<KeccakHasher>;
|
||||
|
||||
/// Convenience type alias to instantiate a Keccak/Rlp-flavoured `TrieDB`
|
||||
///
|
||||
/// Use it as a `Trie` trait object. You can use `db()` to get the backing database object.
|
||||
/// Use `get` and `contains` to query values associated with keys in the trie.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// extern crate patricia_trie as trie;
|
||||
/// extern crate patricia_trie_ethereum as ethtrie;
|
||||
/// extern crate hashdb;
|
||||
/// extern crate keccak_hasher;
|
||||
/// extern crate memorydb;
|
||||
/// extern crate ethereum_types;
|
||||
///
|
||||
/// use trie::*;
|
||||
/// use hashdb::*;
|
||||
/// use keccak_hasher::KeccakHasher;
|
||||
/// use memorydb::*;
|
||||
/// use ethereum_types::H256;
|
||||
/// use ethtrie::{TrieDB, TrieDBMut};
|
||||
///
|
||||
///
|
||||
/// fn main() {
|
||||
/// let mut memdb = MemoryDB::<KeccakHasher>::new();
|
||||
/// let mut root = H256::new();
|
||||
/// TrieDBMut::new(&mut memdb, &mut root).insert(b"foo", b"bar").unwrap();
|
||||
/// let t = TrieDB::new(&memdb, &root).unwrap();
|
||||
/// assert!(t.contains(b"foo").unwrap());
|
||||
/// assert_eq!(t.get(b"foo").unwrap().unwrap(), DBValue::from_slice(b"bar"));
|
||||
/// }
|
||||
/// ```
|
||||
pub type TrieDB<'db> = trie::TrieDB<'db, KeccakHasher, RlpCodec>;
|
||||
|
||||
/// Convenience type alias to instantiate a Keccak/Rlp-flavoured `SecTrieDB`
|
||||
@ -45,6 +75,41 @@ pub type SecTrieDB<'db> = trie::SecTrieDB<'db, KeccakHasher, RlpCodec>;
|
||||
pub type FatDB<'db> = trie::FatDB<'db, KeccakHasher, RlpCodec>;
|
||||
|
||||
/// Convenience type alias to instantiate a Keccak/Rlp-flavoured `TrieDBMut`
|
||||
///
|
||||
/// Use it as a `TrieMut` trait object. You can use `db()` to get the backing database object.
|
||||
/// Note that changes are not committed to the database until `commit` is called.
|
||||
/// Querying the root or dropping the trie will commit automatically.
|
||||
|
||||
/// # Example
|
||||
/// ```
|
||||
/// extern crate patricia_trie as trie;
|
||||
/// extern crate patricia_trie_ethereum as ethtrie;
|
||||
/// extern crate hashdb;
|
||||
/// extern crate keccak_hash;
|
||||
/// extern crate keccak_hasher;
|
||||
/// extern crate memorydb;
|
||||
/// extern crate ethereum_types;
|
||||
///
|
||||
/// use keccak_hash::KECCAK_NULL_RLP;
|
||||
/// use ethtrie::{TrieDBMut, trie::TrieMut};
|
||||
/// use hashdb::DBValue;
|
||||
/// use keccak_hasher::KeccakHasher;
|
||||
/// use memorydb::*;
|
||||
/// use ethereum_types::H256;
|
||||
///
|
||||
/// fn main() {
|
||||
/// let mut memdb = MemoryDB::<KeccakHasher>::new();
|
||||
/// let mut root = H256::new();
|
||||
/// let mut t = TrieDBMut::new(&mut memdb, &mut root);
|
||||
/// assert!(t.is_empty());
|
||||
/// assert_eq!(*t.root(), KECCAK_NULL_RLP);
|
||||
/// t.insert(b"foo", b"bar").unwrap();
|
||||
/// assert!(t.contains(b"foo").unwrap());
|
||||
/// assert_eq!(t.get(b"foo").unwrap().unwrap(), DBValue::from_slice(b"bar"));
|
||||
/// t.remove(b"foo").unwrap();
|
||||
/// assert!(!t.contains(b"foo").unwrap());
|
||||
/// }
|
||||
/// ```
|
||||
pub type TrieDBMut<'db> = trie::TrieDBMut<'db, KeccakHasher, RlpCodec>;
|
||||
|
||||
/// Convenience type alias to instantiate a Keccak/Rlp-flavoured `SecTrieDBMut`
|
||||
|
@ -4,6 +4,6 @@ version = "0.1.0"
|
||||
authors = ["Parity Technologies <admin@parity.io>"]
|
||||
|
||||
[dependencies]
|
||||
rlp = { git = "https://github.com/paritytech/parity-common" }
|
||||
rlp = "0.2.4"
|
||||
elastic-array = "0.10"
|
||||
lazy_static = "1.0"
|
||||
|
@ -12,4 +12,4 @@ syn = "0.13"
|
||||
quote = "0.5"
|
||||
|
||||
[dev-dependencies]
|
||||
rlp = { git = "https://github.com/paritytech/parity-common" }
|
||||
rlp = "0.2.4"
|
||||
|
@ -1,9 +0,0 @@
|
||||
[package]
|
||||
name = "trace-time"
|
||||
description = "Easily trace time to execute a scope."
|
||||
version = "0.1.0"
|
||||
authors = ["Parity Technologies <admin@parity.io>"]
|
||||
license = "GPL-3.0"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4"
|
@ -1,55 +0,0 @@
|
||||
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Performance timer with logging
|
||||
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
use std::time::Instant;
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! trace_time {
|
||||
($name: expr) => {
|
||||
let _timer = $crate::PerfTimer::new($name);
|
||||
}
|
||||
}
|
||||
|
||||
/// Performance timer with logging. Starts measuring time in the constructor, prints
|
||||
/// elapsed time in the destructor or when `stop` is called.
|
||||
pub struct PerfTimer {
|
||||
name: &'static str,
|
||||
start: Instant,
|
||||
}
|
||||
|
||||
impl PerfTimer {
|
||||
/// Create an instance with given name.
|
||||
pub fn new(name: &'static str) -> PerfTimer {
|
||||
PerfTimer {
|
||||
name,
|
||||
start: Instant::now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for PerfTimer {
|
||||
fn drop(&mut self) {
|
||||
let elapsed = self.start.elapsed();
|
||||
let ms = elapsed.subsec_nanos() as f32 / 1_000_000.0 +
|
||||
elapsed.as_secs() as f32 * 1_000.0;
|
||||
trace!(target: "perf", "{}: {:.2}ms", self.name, ms);
|
||||
}
|
||||
}
|
@ -6,6 +6,6 @@ description = "Trie-root helpers, ethereum style"
|
||||
license = "GPL-3.0"
|
||||
|
||||
[dependencies]
|
||||
triehash = { git = "https://github.com/paritytech/parity-common" }
|
||||
ethereum-types = "0.3"
|
||||
keccak-hasher = { path = "../keccak-hasher" }
|
||||
triehash = { version = "0.2.3", features = ["ethereum"] }
|
||||
ethereum-types = "0.4"
|
||||
keccak-hasher = { path = "../keccak-hasher" }
|
||||
|
@ -21,8 +21,8 @@ ropsten = { forkBlock = 10, critical = false }
|
||||
kovan = { forkBlock = 6600000, critical = false }
|
||||
|
||||
[dependencies]
|
||||
parity-bytes = { git = "https://github.com/paritytech/parity-common" }
|
||||
rlp = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-bytes = "0.1"
|
||||
rlp = { version = "0.2.4", features = ["ethereum"] }
|
||||
target_info = "0.1"
|
||||
|
||||
[build-dependencies]
|
||||
|
@ -7,9 +7,9 @@ description = "Whisper Protocol implementation for Parity"
|
||||
[dependencies]
|
||||
bitflags = "0.9"
|
||||
byteorder = "1.0.0"
|
||||
ethereum-types = "0.3"
|
||||
ethereum-types = "0.4"
|
||||
ethcore-network = { path = "../util/network" }
|
||||
parity-crypto = { git = "https://github.com/paritytech/parity-common" }
|
||||
parity-crypto = "0.1"
|
||||
ethkey = { path = "../ethkey" }
|
||||
hex = "0.2"
|
||||
log = "0.4"
|
||||
@ -17,7 +17,7 @@ mem = { path = "../util/mem" }
|
||||
ordered-float = "0.5"
|
||||
parking_lot = "0.6"
|
||||
rand = "0.4"
|
||||
rlp = { git = "https://github.com/paritytech/parity-common" }
|
||||
rlp = { version = "0.2.4", features = ["ethereum"] }
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
|
@ -19,7 +19,7 @@
|
||||
use std::fmt;
|
||||
use std::ops::Deref;
|
||||
|
||||
use ethereum_types::{H32, H64, H128, H256, H264, H512, H1024};
|
||||
use ethereum_types::{H32, H64, H128, H256, H264, H512};
|
||||
use hex::{ToHex, FromHex};
|
||||
|
||||
use serde::{Serialize, Serializer, Deserialize, Deserializer};
|
||||
@ -51,7 +51,7 @@ macro_rules! impl_hex_for_hash {
|
||||
}
|
||||
|
||||
impl_hex_for_hash!(
|
||||
H32 H64 H128 H256 H264 H512 H1024
|
||||
H32 H64 H128 H256 H264 H512
|
||||
);
|
||||
|
||||
/// Wrapper structure around hex-encoded data.
|
||||
|
Loading…
Reference in New Issue
Block a user