Merge branch 'master' into client-ipc-refact

This commit is contained in:
NikVolf 2016-06-30 20:32:13 +03:00
commit 97eb311cb0
34 changed files with 463 additions and 91 deletions

14
Cargo.lock generated
View File

@ -261,6 +261,7 @@ dependencies = [
"ethjson 0.1.0", "ethjson 0.1.0",
"ethstore 0.1.0", "ethstore 0.1.0",
"heapsize 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"hyper 0.9.4 (git+https://github.com/ethcore/hyper)",
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
@ -285,7 +286,7 @@ dependencies = [
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-dapps 0.3.0 (git+https://github.com/ethcore/parity-dapps-rs.git)", "parity-dapps 0.3.0 (git+https://github.com/ethcore/parity-dapps-rs.git)",
"parity-dapps-builtins 0.5.1 (git+https://github.com/ethcore/parity-dapps-builtins-rs.git)", "parity-dapps-builtins 0.5.2 (git+https://github.com/ethcore/parity-dapps-builtins-rs.git)",
"parity-dapps-status 0.5.0 (git+https://github.com/ethcore/parity-dapps-status-rs.git)", "parity-dapps-status 0.5.0 (git+https://github.com/ethcore/parity-dapps-status-rs.git)",
"parity-dapps-wallet 0.6.1 (git+https://github.com/ethcore/parity-dapps-wallet-rs.git)", "parity-dapps-wallet 0.6.1 (git+https://github.com/ethcore/parity-dapps-wallet-rs.git)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
@ -378,6 +379,7 @@ dependencies = [
name = "ethcore-util" name = "ethcore-util"
version = "1.3.0" version = "1.3.0"
dependencies = [ dependencies = [
"ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
"arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
"bigint 0.1.0", "bigint 0.1.0",
"chrono 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)", "chrono 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
@ -549,7 +551,7 @@ dependencies = [
[[package]] [[package]]
name = "hyper" name = "hyper"
version = "0.9.4" version = "0.9.4"
source = "git+https://github.com/ethcore/hyper#7ccfcb2aa7e6aa6300efa8cebd6a0e6ce55582ea" source = "git+https://github.com/ethcore/hyper#9e346c1d4bc30cd4142dea9d8a0b117d30858ca4"
dependencies = [ dependencies = [
"cookie 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", "cookie 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)",
"httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -558,7 +560,7 @@ dependencies = [
"mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rotor 0.6.3 (git+https://github.com/ethcore/rotor)", "rotor 0.6.3 (git+https://github.com/ethcore/rotor)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
"spmc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "spmc 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -911,8 +913,8 @@ dependencies = [
[[package]] [[package]]
name = "parity-dapps-builtins" name = "parity-dapps-builtins"
version = "0.5.1" version = "0.5.2"
source = "git+https://github.com/ethcore/parity-dapps-builtins-rs.git#7408838e8ca3b57c6b0cf5da2e31e0e275959955" source = "git+https://github.com/ethcore/parity-dapps-builtins-rs.git#01af2091d5d70dfe0aecbfd96308f0ae79fc61e6"
dependencies = [ dependencies = [
"parity-dapps 0.3.0 (git+https://github.com/ethcore/parity-dapps-rs.git)", "parity-dapps 0.3.0 (git+https://github.com/ethcore/parity-dapps-rs.git)",
] ]
@ -1224,7 +1226,7 @@ dependencies = [
[[package]] [[package]]
name = "spmc" name = "spmc"
version = "0.2.0" version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]

View File

@ -23,7 +23,7 @@ ethcore-util = { path = "../util" }
parity-dapps = { git = "https://github.com/ethcore/parity-dapps-rs.git", version = "0.3" } parity-dapps = { git = "https://github.com/ethcore/parity-dapps-rs.git", version = "0.3" }
# List of apps # List of apps
parity-dapps-status = { git = "https://github.com/ethcore/parity-dapps-status-rs.git", version = "0.5.0" } parity-dapps-status = { git = "https://github.com/ethcore/parity-dapps-status-rs.git", version = "0.5.0" }
parity-dapps-builtins = { git = "https://github.com/ethcore/parity-dapps-builtins-rs.git", version = "0.5.0" } parity-dapps-builtins = { git = "https://github.com/ethcore/parity-dapps-builtins-rs.git", version = "0.5.2" }
parity-dapps-wallet = { git = "https://github.com/ethcore/parity-dapps-wallet-rs.git", version = "0.6.0", optional = true } parity-dapps-wallet = { git = "https://github.com/ethcore/parity-dapps-wallet-rs.git", version = "0.6.0", optional = true }
parity-dapps-dao = { git = "https://github.com/ethcore/parity-dapps-dao-rs.git", version = "0.4.0", optional = true } parity-dapps-dao = { git = "https://github.com/ethcore/parity-dapps-dao-rs.git", version = "0.4.0", optional = true }
parity-dapps-makerotc = { git = "https://github.com/ethcore/parity-dapps-makerotc-rs.git", version = "0.3.0", optional = true } parity-dapps-makerotc = { git = "https://github.com/ethcore/parity-dapps-makerotc-rs.git", version = "0.3.0", optional = true }

View File

@ -33,6 +33,10 @@ rayon = "0.3.1"
ethstore = { path = "../ethstore" } ethstore = { path = "../ethstore" }
semver = "0.2" semver = "0.2"
[dependencies.hyper]
git = "https://github.com/ethcore/hyper"
default-features = false
[features] [features]
jit = ["evmjit"] jit = ["evmjit"]
evm-debug = [] evm-debug = []

View File

@ -108,6 +108,12 @@ impl Account {
self.code_cache = code; self.code_cache = code;
} }
/// Reset this account's code to the given code.
pub fn reset_code(&mut self, code: Bytes) {
self.code_hash = None;
self.init_code(code);
}
/// Set (and cache) the contents of the trie's storage at `key` to `value`. /// Set (and cache) the contents of the trie's storage at `key` to `value`.
pub fn set_storage(&mut self, key: H256, value: H256) { pub fn set_storage(&mut self, key: H256, value: H256) {
self.storage_overlay.borrow_mut().insert(key, (Filth::Dirty, value)); self.storage_overlay.borrow_mut().insert(key, (Filth::Dirty, value));
@ -336,6 +342,21 @@ mod tests {
assert_eq!(a.code_hash().hex(), "af231e631776a517ca23125370d542873eca1fb4d613ed9b5d5335a46ae5b7eb"); assert_eq!(a.code_hash().hex(), "af231e631776a517ca23125370d542873eca1fb4d613ed9b5d5335a46ae5b7eb");
} }
#[test]
fn reset_code() {
let mut a = Account::new_contract(69.into(), 0.into());
let mut db = MemoryDB::new();
let mut db = AccountDBMut::new(&mut db, &Address::new());
a.init_code(vec![0x55, 0x44, 0xffu8]);
assert_eq!(a.code_hash(), SHA3_EMPTY);
a.commit_code(&mut db);
assert_eq!(a.code_hash().hex(), "af231e631776a517ca23125370d542873eca1fb4d613ed9b5d5335a46ae5b7eb");
a.reset_code(vec![0x55]);
assert_eq!(a.code_hash(), SHA3_EMPTY);
a.commit_code(&mut db);
assert_eq!(a.code_hash().hex(), "37bf2238b11b68cdc8382cece82651b59d3c3988873b6e0f33d79694aa45f1be");
}
#[test] #[test]
fn rlpio() { fn rlpio() {
let a = Account::new(U256::from(69u8), U256::from(0u8), HashMap::new(), Bytes::new()); let a = Account::new(U256::from(69u8), U256::from(0u8), HashMap::new(), Bytes::new());
@ -348,7 +369,6 @@ mod tests {
#[test] #[test]
fn new_account() { fn new_account() {
let a = Account::new(U256::from(69u8), U256::from(0u8), HashMap::new(), Bytes::new()); let a = Account::new(U256::from(69u8), U256::from(0u8), HashMap::new(), Bytes::new());
assert_eq!(a.rlp().to_hex(), "f8448045a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"); assert_eq!(a.rlp().to_hex(), "f8448045a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470");
assert_eq!(a.balance(), &U256::from(69u8)); assert_eq!(a.balance(), &U256::from(69u8));
@ -359,7 +379,6 @@ mod tests {
#[test] #[test]
fn create_account() { fn create_account() {
let a = Account::new(U256::from(69u8), U256::from(0u8), HashMap::new(), Bytes::new()); let a = Account::new(U256::from(69u8), U256::from(0u8), HashMap::new(), Bytes::new());
assert_eq!(a.rlp().to_hex(), "f8448045a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"); assert_eq!(a.rlp().to_hex(), "f8448045a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470");
} }

View File

@ -164,6 +164,12 @@ pub trait IsBlock {
fn uncles(&self) -> &Vec<Header> { &self.block().base.uncles } fn uncles(&self) -> &Vec<Header> { &self.block().base.uncles }
} }
/// Trait for a object that has a state database.
pub trait Drain {
/// Drop this object and return the underlieing database.
fn drain(self) -> Box<JournalDB>;
}
impl IsBlock for ExecutedBlock { impl IsBlock for ExecutedBlock {
fn block(&self) -> &ExecutedBlock { self } fn block(&self) -> &ExecutedBlock { self }
} }
@ -436,9 +442,11 @@ impl LockedBlock {
_ => Ok(SealedBlock { block: s.block, uncle_bytes: s.uncle_bytes }), _ => Ok(SealedBlock { block: s.block, uncle_bytes: s.uncle_bytes }),
} }
} }
}
impl Drain for LockedBlock {
/// Drop this object and return the underlieing database. /// Drop this object and return the underlieing database.
pub fn drain(self) -> Box<JournalDB> { self.block.state.drop().1 } fn drain(self) -> Box<JournalDB> { self.block.state.drop().1 }
} }
impl SealedBlock { impl SealedBlock {
@ -450,9 +458,11 @@ impl SealedBlock {
block_rlp.append_raw(&self.uncle_bytes, 1); block_rlp.append_raw(&self.uncle_bytes, 1);
block_rlp.out() block_rlp.out()
} }
}
impl Drain for SealedBlock {
/// Drop this object and return the underlieing database. /// Drop this object and return the underlieing database.
pub fn drain(self) -> Box<JournalDB> { self.block.state.drop().1 } fn drain(self) -> Box<JournalDB> { self.block.state.drop().1 }
} }
impl IsBlock for SealedBlock { impl IsBlock for SealedBlock {

View File

@ -37,7 +37,7 @@ use util::numbers::U256;
use util::Itertools; use util::Itertools;
use blockchain::TreeRoute; use blockchain::TreeRoute;
use block_queue::BlockQueueInfo; use block_queue::BlockQueueInfo;
use block::OpenBlock; use block::{OpenBlock, SealedBlock};
use header::{BlockNumber, Header}; use header::{BlockNumber, Header};
use transaction::{LocalizedTransaction, SignedTransaction}; use transaction::{LocalizedTransaction, SignedTransaction};
use log_entry::LocalizedLogEntry; use log_entry::LocalizedLogEntry;
@ -244,4 +244,7 @@ pub trait MiningBlockChainClient : BlockChainClient {
/// Returns EvmFactory. /// Returns EvmFactory.
fn vm_factory(&self) -> &EvmFactory; fn vm_factory(&self) -> &EvmFactory;
/// Import sealed block. Skips all verifications.
fn import_sealed_block(&self, block: SealedBlock) -> ImportResult;
} }

View File

@ -33,7 +33,7 @@ use miner::{Miner, MinerService};
use spec::Spec; use spec::Spec;
use block_queue::BlockQueueInfo; use block_queue::BlockQueueInfo;
use block::OpenBlock; use block::{OpenBlock, SealedBlock};
use executive::Executed; use executive::Executed;
use error::{ExecutionError}; use error::{ExecutionError};
use trace::LocalizedTrace; use trace::LocalizedTrace;
@ -249,6 +249,10 @@ impl MiningBlockChainClient for TestBlockChainClient {
fn vm_factory(&self) -> &EvmFactory { fn vm_factory(&self) -> &EvmFactory {
unimplemented!(); unimplemented!();
} }
fn import_sealed_block(&self, _block: SealedBlock) -> ImportResult {
unimplemented!();
}
} }
impl BlockChainClient for TestBlockChainClient { impl BlockChainClient for TestBlockChainClient {

View File

@ -14,9 +14,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
extern crate ethash; use ethash::{quick_get_difficulty, EthashManager, H256 as EH256};
use self::ethash::{quick_get_difficulty, EthashManager, H256 as EH256};
use common::*; use common::*;
use block::*; use block::*;
use spec::CommonParams; use spec::CommonParams;

View File

@ -91,6 +91,8 @@ extern crate ethjson;
extern crate bloomchain; extern crate bloomchain;
#[macro_use] extern crate ethcore_ipc as ipc; #[macro_use] extern crate ethcore_ipc as ipc;
extern crate rayon; extern crate rayon;
extern crate hyper;
extern crate ethash;
pub extern crate ethstore; pub extern crate ethstore;
extern crate semver; extern crate semver;

View File

@ -16,8 +16,10 @@
use rayon::prelude::*; use rayon::prelude::*;
use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicBool;
use std::time::{Instant, Duration};
use util::*; use util::*;
use util::Colour::White;
use account_provider::AccountProvider; use account_provider::AccountProvider;
use views::{BlockView, HeaderView}; use views::{BlockView, HeaderView};
use client::{MiningBlockChainClient, Executive, Executed, EnvInfo, TransactOptions, BlockID, CallAnalytics}; use client::{MiningBlockChainClient, Executive, Executed, EnvInfo, TransactOptions, BlockID, CallAnalytics};
@ -28,6 +30,7 @@ use receipt::{Receipt};
use spec::Spec; use spec::Spec;
use engine::Engine; use engine::Engine;
use miner::{MinerService, MinerStatus, TransactionQueue, AccountDetails, TransactionImportResult, TransactionOrigin}; use miner::{MinerService, MinerStatus, TransactionQueue, AccountDetails, TransactionImportResult, TransactionOrigin};
use miner::work_notify::WorkPoster;
/// Different possible definitions for pending transaction set. /// Different possible definitions for pending transaction set.
#[derive(Debug)] #[derive(Debug)]
@ -44,29 +47,41 @@ pub enum PendingSet {
/// Configures the behaviour of the miner. /// Configures the behaviour of the miner.
#[derive(Debug)] #[derive(Debug)]
pub struct MinerOptions { pub struct MinerOptions {
/// URLs to notify when there is new work.
pub new_work_notify: Vec<String>,
/// Force the miner to reseal, even when nobody has asked for work. /// Force the miner to reseal, even when nobody has asked for work.
pub force_sealing: bool, pub force_sealing: bool,
/// Reseal on receipt of new external transactions. /// Reseal on receipt of new external transactions.
pub reseal_on_external_tx: bool, pub reseal_on_external_tx: bool,
/// Reseal on receipt of new local transactions. /// Reseal on receipt of new local transactions.
pub reseal_on_own_tx: bool, pub reseal_on_own_tx: bool,
/// Minimum period between transaction-inspired reseals.
pub reseal_min_period: Duration,
/// Maximum amount of gas to bother considering for block insertion. /// Maximum amount of gas to bother considering for block insertion.
pub tx_gas_limit: U256, pub tx_gas_limit: U256,
/// Maximum size of the transaction queue. /// Maximum size of the transaction queue.
pub tx_queue_size: usize, pub tx_queue_size: usize,
/// Whether we should fallback to providing all the queue's transactions or just pending. /// Whether we should fallback to providing all the queue's transactions or just pending.
pub pending_set: PendingSet, pub pending_set: PendingSet,
/// How many historical work packages can we store before running out?
pub work_queue_size: usize,
/// Can we submit two different solutions for the same block and expect both to result in an import?
pub enable_resubmission: bool,
} }
impl Default for MinerOptions { impl Default for MinerOptions {
fn default() -> Self { fn default() -> Self {
MinerOptions { MinerOptions {
new_work_notify: vec![],
force_sealing: false, force_sealing: false,
reseal_on_external_tx: true, reseal_on_external_tx: true,
reseal_on_own_tx: true, reseal_on_own_tx: true,
tx_gas_limit: !U256::zero(), tx_gas_limit: !U256::zero(),
tx_queue_size: 1024, tx_queue_size: 1024,
pending_set: PendingSet::AlwaysQueue, pending_set: PendingSet::AlwaysQueue,
reseal_min_period: Duration::from_secs(0),
work_queue_size: 20,
enable_resubmission: true,
} }
} }
} }
@ -80,6 +95,7 @@ pub struct Miner {
// for sealing... // for sealing...
options: MinerOptions, options: MinerOptions,
sealing_enabled: AtomicBool, sealing_enabled: AtomicBool,
next_allowed_reseal: Mutex<Instant>,
sealing_block_last_request: Mutex<u64>, sealing_block_last_request: Mutex<u64>,
gas_range_target: RwLock<(U256, U256)>, gas_range_target: RwLock<(U256, U256)>,
author: RwLock<Address>, author: RwLock<Address>,
@ -87,6 +103,7 @@ pub struct Miner {
spec: Spec, spec: Spec,
accounts: Option<Arc<AccountProvider>>, accounts: Option<Arc<AccountProvider>>,
work_poster: Option<WorkPoster>,
} }
impl Miner { impl Miner {
@ -96,29 +113,34 @@ impl Miner {
transaction_queue: Mutex::new(TransactionQueue::new()), transaction_queue: Mutex::new(TransactionQueue::new()),
options: Default::default(), options: Default::default(),
sealing_enabled: AtomicBool::new(false), sealing_enabled: AtomicBool::new(false),
next_allowed_reseal: Mutex::new(Instant::now()),
sealing_block_last_request: Mutex::new(0), sealing_block_last_request: Mutex::new(0),
sealing_work: Mutex::new(UsingQueue::new(5)), sealing_work: Mutex::new(UsingQueue::new(20)),
gas_range_target: RwLock::new((U256::zero(), U256::zero())), gas_range_target: RwLock::new((U256::zero(), U256::zero())),
author: RwLock::new(Address::default()), author: RwLock::new(Address::default()),
extra_data: RwLock::new(Vec::new()), extra_data: RwLock::new(Vec::new()),
accounts: None, accounts: None,
spec: spec, spec: spec,
work_poster: None,
} }
} }
/// Creates new instance of miner /// Creates new instance of miner
pub fn new(options: MinerOptions, spec: Spec, accounts: Option<Arc<AccountProvider>>) -> Arc<Miner> { pub fn new(options: MinerOptions, spec: Spec, accounts: Option<Arc<AccountProvider>>) -> Arc<Miner> {
let work_poster = if !options.new_work_notify.is_empty() { Some(WorkPoster::new(&options.new_work_notify)) } else { None };
Arc::new(Miner { Arc::new(Miner {
transaction_queue: Mutex::new(TransactionQueue::with_limits(options.tx_queue_size, options.tx_gas_limit)), transaction_queue: Mutex::new(TransactionQueue::with_limits(options.tx_queue_size, options.tx_gas_limit)),
sealing_enabled: AtomicBool::new(options.force_sealing), sealing_enabled: AtomicBool::new(options.force_sealing || !options.new_work_notify.is_empty()),
options: options, next_allowed_reseal: Mutex::new(Instant::now()),
sealing_block_last_request: Mutex::new(0), sealing_block_last_request: Mutex::new(0),
sealing_work: Mutex::new(UsingQueue::new(5)), sealing_work: Mutex::new(UsingQueue::new(options.work_queue_size)),
gas_range_target: RwLock::new((U256::zero(), U256::zero())), gas_range_target: RwLock::new((U256::zero(), U256::zero())),
author: RwLock::new(Address::default()), author: RwLock::new(Address::default()),
extra_data: RwLock::new(Vec::new()), extra_data: RwLock::new(Vec::new()),
options: options,
accounts: accounts, accounts: accounts,
spec: spec, spec: spec,
work_poster: work_poster,
}) })
} }
@ -126,15 +148,20 @@ impl Miner {
self.spec.engine.deref() self.spec.engine.deref()
} }
fn forced_sealing(&self) -> bool {
self.options.force_sealing || !self.options.new_work_notify.is_empty()
}
/// Prepares new block for sealing including top transactions from queue. /// Prepares new block for sealing including top transactions from queue.
#[cfg_attr(feature="dev", allow(match_same_arms))] #[cfg_attr(feature="dev", allow(match_same_arms))]
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))] #[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
fn prepare_sealing(&self, chain: &MiningBlockChainClient) { fn prepare_sealing(&self, chain: &MiningBlockChainClient) {
trace!(target: "miner", "prepare_sealing: entering"); trace!(target: "miner", "prepare_sealing: entering");
let (transactions, mut open_block) = { let (transactions, mut open_block, last_work_hash) = {
let transactions = {self.transaction_queue.lock().unwrap().top_transactions()}; let transactions = {self.transaction_queue.lock().unwrap().top_transactions()};
let mut sealing_work = self.sealing_work.lock().unwrap(); let mut sealing_work = self.sealing_work.lock().unwrap();
let last_work_hash = sealing_work.peek_last_ref().map(|pb| pb.block().fields().header.hash());
let best_hash = chain.best_block_header().sha3(); let best_hash = chain.best_block_header().sha3();
/* /*
// check to see if last ClosedBlock in would_seals is actually same parent block. // check to see if last ClosedBlock in would_seals is actually same parent block.
@ -161,7 +188,7 @@ impl Miner {
) )
} }
}; };
(transactions, open_block) (transactions, open_block, last_work_hash)
}; };
let mut invalid_transactions = HashSet::new(); let mut invalid_transactions = HashSet::new();
@ -227,13 +254,23 @@ impl Miner {
} }
} }
let mut sealing_work = self.sealing_work.lock().unwrap(); let work = {
if sealing_work.peek_last_ref().map_or(true, |pb| pb.block().fields().header.hash() != block.block().fields().header.hash()) { let mut sealing_work = self.sealing_work.lock().unwrap();
trace!(target: "miner", "Pushing a new, refreshed or borrowed pending {}...", block.block().fields().header.hash()); trace!(target: "miner", "Checking whether we need to reseal: last={:?}, this={:?}", last_work_hash, block.block().fields().header.hash());
sealing_work.push(block); let work = if last_work_hash.map_or(true, |h| h != block.block().fields().header.hash()) {
} trace!(target: "miner", "Pushing a new, refreshed or borrowed pending {}...", block.block().fields().header.hash());
let pow_hash = block.block().fields().header.hash();
trace!(target: "miner", "prepare_sealing: leaving (last={:?})", sealing_work.peek_last_ref().map(|b| b.block().fields().header.hash())); let number = block.block().fields().header.number();
let difficulty = *block.block().fields().header.difficulty();
sealing_work.push(block);
Some((pow_hash, difficulty, number))
} else {
None
};
trace!(target: "miner", "prepare_sealing: leaving (last={:?})", sealing_work.peek_last_ref().map(|b| b.block().fields().header.hash()));
work
};
work.map(|(pow_hash, difficulty, number)| self.work_poster.as_ref().map(|ref p| p.notify(pow_hash, difficulty, number)));
} }
fn update_gas_limit(&self, chain: &MiningBlockChainClient) { fn update_gas_limit(&self, chain: &MiningBlockChainClient) {
@ -261,6 +298,9 @@ impl Miner {
// Return if // Return if
!have_work !have_work
} }
/// Are we allowed to do a non-mandatory reseal?
fn tx_reseal_allowed(&self) -> bool { Instant::now() > *self.next_allowed_reseal.lock().unwrap() }
} }
const SEALING_TIMEOUT_IN_BLOCKS : u64 = 5; const SEALING_TIMEOUT_IN_BLOCKS : u64 = 5;
@ -431,7 +471,7 @@ impl MinerService for Miner {
.map(|tx| transaction_queue.add(tx, &fetch_account, TransactionOrigin::External)) .map(|tx| transaction_queue.add(tx, &fetch_account, TransactionOrigin::External))
.collect() .collect()
}; };
if !results.is_empty() && self.options.reseal_on_external_tx { if !results.is_empty() && self.options.reseal_on_external_tx && self.tx_reseal_allowed() {
self.update_sealing(chain); self.update_sealing(chain);
} }
results results
@ -466,7 +506,7 @@ impl MinerService for Miner {
import import
}; };
if imported.is_ok() && self.options.reseal_on_own_tx { if imported.is_ok() && self.options.reseal_on_own_tx && self.tx_reseal_allowed() {
// Make sure to do it after transaction is imported and lock is droped. // Make sure to do it after transaction is imported and lock is droped.
// We need to create pending block and enable sealing // We need to create pending block and enable sealing
let prepared = self.enable_and_prepare_sealing(chain); let prepared = self.enable_and_prepare_sealing(chain);
@ -549,7 +589,7 @@ impl MinerService for Miner {
let current_no = chain.chain_info().best_block_number; let current_no = chain.chain_info().best_block_number;
let has_local_transactions = self.transaction_queue.lock().unwrap().has_local_pending_transactions(); let has_local_transactions = self.transaction_queue.lock().unwrap().has_local_pending_transactions();
let last_request = *self.sealing_block_last_request.lock().unwrap(); let last_request = *self.sealing_block_last_request.lock().unwrap();
let should_disable_sealing = !self.options.force_sealing let should_disable_sealing = !self.forced_sealing()
&& !has_local_transactions && !has_local_transactions
&& current_no > last_request && current_no > last_request
&& current_no - last_request > SEALING_TIMEOUT_IN_BLOCKS; && current_no - last_request > SEALING_TIMEOUT_IN_BLOCKS;
@ -559,6 +599,7 @@ impl MinerService for Miner {
self.sealing_enabled.store(false, atomic::Ordering::Relaxed); self.sealing_enabled.store(false, atomic::Ordering::Relaxed);
self.sealing_work.lock().unwrap().reset(); self.sealing_work.lock().unwrap().reset();
} else { } else {
*self.next_allowed_reseal.lock().unwrap() = Instant::now() + self.options.reseal_min_period;
self.prepare_sealing(chain); self.prepare_sealing(chain);
} }
} }
@ -575,26 +616,22 @@ impl MinerService for Miner {
} }
fn submit_seal(&self, chain: &MiningBlockChainClient, pow_hash: H256, seal: Vec<Bytes>) -> Result<(), Error> { fn submit_seal(&self, chain: &MiningBlockChainClient, pow_hash: H256, seal: Vec<Bytes>) -> Result<(), Error> {
if let Some(b) = self.sealing_work.lock().unwrap().take_used_if(|b| &b.hash() == &pow_hash) { let result = if let Some(b) = self.sealing_work.lock().unwrap().get_used_if(if self.options.enable_resubmission { GetAction::Clone } else { GetAction::Take }, |b| &b.hash() == &pow_hash) {
match b.lock().try_seal(self.engine(), seal) { b.lock().try_seal(self.engine(), seal).or_else(|_| {
Err(_) => { warn!(target: "miner", "Mined solution rejected: Invalid.");
info!(target: "miner", "Mined block rejected, PoW was invalid."); Err(Error::PowInvalid)
Err(Error::PowInvalid) })
}
Ok(sealed) => {
info!(target: "miner", "New block mined, hash: {}", sealed.header().hash());
// TODO: commit DB from `sealed.drain` and make a VerifiedBlock to skip running the transactions twice.
let b = sealed.rlp_bytes();
let h = b.sha3();
try!(chain.import_block(b));
info!("Block {} submitted and imported.", h);
Ok(())
}
}
} else { } else {
info!(target: "miner", "Mined block rejected, PoW hash invalid or out of date."); warn!(target: "miner", "Mined solution rejected: Block unknown or out of date.");
Err(Error::PowHashInvalid) Err(Error::PowHashInvalid)
} };
result.and_then(|sealed| {
let n = sealed.header().number();
let h = sealed.header().hash();
try!(chain.import_sealed_block(sealed));
info!(target: "miner", "Mined block imported OK. #{}: {}", paint(White.bold(), format!("{}", n)), paint(White.bold(), h.hex()));
Ok(())
})
} }
fn chain_new_blocks(&self, chain: &MiningBlockChainClient, _imported: &[H256], _invalid: &[H256], enacted: &[H256], retracted: &[H256]) { fn chain_new_blocks(&self, chain: &MiningBlockChainClient, _imported: &[H256], _invalid: &[H256], enacted: &[H256], retracted: &[H256]) {

View File

@ -45,6 +45,7 @@
mod miner; mod miner;
mod external; mod external;
mod transaction_queue; mod transaction_queue;
mod work_notify;
pub use self::transaction_queue::{TransactionQueue, AccountDetails, TransactionImportResult, TransactionOrigin}; pub use self::transaction_queue::{TransactionQueue, AccountDetails, TransactionImportResult, TransactionOrigin};
pub use self::miner::{Miner, MinerOptions, PendingSet}; pub use self::miner::{Miner, MinerOptions, PendingSet};

View File

@ -0,0 +1,115 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
extern crate hyper;
use hyper::header::ContentType;
use hyper::method::Method;
use hyper::client::{Request, Response, Client};
use hyper::{Next};
use hyper::net::HttpStream;
use ethash::SeedHashCompute;
use hyper::Url;
use util::*;
use ethereum::ethash::Ethash;
pub struct WorkPoster {
urls: Vec<Url>,
client: Mutex<Client<PostHandler>>,
seed_compute: Mutex<SeedHashCompute>,
}
impl WorkPoster {
pub fn new(urls: &[String]) -> Self {
let urls = urls.into_iter().filter_map(|u| {
match Url::parse(&u) {
Ok(url) => Some(url),
Err(e) => {
warn!("Error parsing URL {} : {}", u, e);
None
}
}
}).collect();
let client = WorkPoster::create_client();
WorkPoster {
client: Mutex::new(client),
urls: urls,
seed_compute: Mutex::new(SeedHashCompute::new()),
}
}
fn create_client() -> Client<PostHandler> {
let client = Client::<PostHandler>::configure()
.keep_alive(true)
.build().expect("Error creating HTTP client") as Client<PostHandler>;
client
}
pub fn notify(&self, pow_hash: H256, difficulty: U256, number: u64) {
// TODO: move this to engine
let target = Ethash::difficulty_to_boundary(&difficulty);
let seed_hash = &self.seed_compute.lock().unwrap().get_seedhash(number);
let seed_hash = H256::from_slice(&seed_hash[..]);
let body = format!(r#"{{ "result": ["0x{}","0x{}","0x{}","0x{:x}"] }}"#,
pow_hash.hex(), seed_hash.hex(), target.hex(), number);
let mut client = self.client.lock().unwrap();
for u in &self.urls {
if let Err(e) = client.request(u.clone(), PostHandler { body: body.clone() }) {
warn!("Error sending HTTP notification to {} : {}, retrying", u, e);
// TODO: remove this once https://github.com/hyperium/hyper/issues/848 is fixed
*client = WorkPoster::create_client();
if let Err(e) = client.request(u.clone(), PostHandler { body: body.clone() }) {
warn!("Error sending HTTP notification to {} : {}", u, e);
}
}
}
}
}
struct PostHandler {
body: String,
}
impl hyper::client::Handler<HttpStream> for PostHandler {
fn on_request(&mut self, request: &mut Request) -> Next {
request.set_method(Method::Post);
request.headers_mut().set(ContentType::json());
Next::write()
}
fn on_request_writable(&mut self, encoder: &mut hyper::Encoder<HttpStream>) -> Next {
if let Err(e) = encoder.write_all(self.body.as_bytes()) {
trace!("Error posting work data: {}", e);
}
encoder.close();
Next::read()
}
fn on_response(&mut self, _response: Response) -> Next {
Next::end()
}
fn on_response_readable(&mut self, _decoder: &mut hyper::Decoder<HttpStream>) -> Next {
Next::end()
}
fn on_error(&mut self, err: hyper::Error) -> Next {
trace!("Error posting work data: {}", err);
Next::end()
}
}

View File

@ -17,6 +17,7 @@
//! Creates and registers client and network services. //! Creates and registers client and network services.
use util::*; use util::*;
use util::Colour::{Yellow, White};
use util::panics::*; use util::panics::*;
use spec::Spec; use spec::Spec;
use error::*; use error::*;
@ -36,6 +37,8 @@ pub enum SyncMessage {
retracted: Vec<H256>, retracted: Vec<H256>,
/// Hashes of blocks that are now included in cannonical chain /// Hashes of blocks that are now included in cannonical chain
enacted: Vec<H256>, enacted: Vec<H256>,
/// Hashes of blocks that are sealed by this node
sealed: Vec<H256>,
}, },
/// Best Block Hash in chain has been changed /// Best Block Hash in chain has been changed
NewChainHead, NewChainHead,
@ -69,8 +72,7 @@ impl ClientService {
try!(net_service.start()); try!(net_service.start());
} }
info!("Starting {}", net_service.host_info()); info!("Configured for {} using {} engine", paint(White.bold(), spec.name.clone()), paint(Yellow.bold(), spec.engine.name().to_owned()));
info!("Configured for {} using {:?} engine", spec.name, spec.engine.name());
let client = try!(Client::new(config, spec, db_path, miner, net_service.io().channel())); let client = try!(Client::new(config, spec, db_path, miner, net_service.io().channel()));
panic_handler.forward_from(client.deref()); panic_handler.forward_from(client.deref());
let client_io = Arc::new(ClientIoHandler { let client_io = Arc::new(ClientIoHandler {

View File

@ -208,12 +208,17 @@ impl State {
self.require(a, false).set_storage(key, value) self.require(a, false).set_storage(key, value)
} }
/// Initialise the code of account `a` so that it is `value` for `key`. /// Initialise the code of account `a` so that it is `code`.
/// NOTE: Account should have been created with `new_contract`. /// NOTE: Account should have been created with `new_contract`.
pub fn init_code(&mut self, a: &Address, code: Bytes) { pub fn init_code(&mut self, a: &Address, code: Bytes) {
self.require_or_from(a, true, || Account::new_contract(0.into(), self.account_start_nonce), |_|{}).init_code(code); self.require_or_from(a, true, || Account::new_contract(0.into(), self.account_start_nonce), |_|{}).init_code(code);
} }
/// Reset the code of account `a` so that it is `code`.
pub fn reset_code(&mut self, a: &Address, code: Bytes) {
self.require_or_from(a, true, || Account::new_contract(0.into(), self.account_start_nonce), |_|{}).reset_code(code);
}
/// Execute a given transaction. /// Execute a given transaction.
/// This will change the state accordingly. /// This will change the state accordingly.
pub fn apply(&mut self, env_info: &EnvInfo, engine: &Engine, vm_factory: &EvmFactory, t: &SignedTransaction, tracing: bool) -> ApplyResult { pub fn apply(&mut self, env_info: &EnvInfo, engine: &Engine, vm_factory: &EvmFactory, t: &SignedTransaction, tracing: bool) -> ApplyResult {

View File

@ -17,7 +17,7 @@
use client::{BlockChainClient, Client, ClientConfig}; use client::{BlockChainClient, Client, ClientConfig};
use common::*; use common::*;
use spec::*; use spec::*;
use block::{OpenBlock}; use block::{OpenBlock, Drain};
use blockchain::{BlockChain, Config as BlockChainConfig}; use blockchain::{BlockChain, Config as BlockChainConfig};
use state::*; use state::*;
use evm::Schedule; use evm::Schedule;

View File

@ -1,6 +1,6 @@
#!/usr/bin/env bash #!/usr/bin/env bash
PARITY_DEB_URL=https://github.com/ethcore/parity/releases/download/v1.2.0/parity_linux_1.2.0-0_amd64.deb PARITY_DEB_URL=https://github.com/ethcore/parity/releases/download/v1.2.1/parity_linux_1.2.1-0_amd64.deb
function run_installer() function run_installer()

View File

@ -137,6 +137,13 @@ Sealing/Mining Options:
own - reseal only on a new local transaction; own - reseal only on a new local transaction;
ext - reseal only on a new external transaction; ext - reseal only on a new external transaction;
all - reseal on all new transactions [default: all]. all - reseal on all new transactions [default: all].
--reseal-min-period MS Specify the minimum time between reseals from
incoming transactions. MS is time measured in
milliseconds [default: 2000].
--work-queue-size ITEMS Specify the number of historical work packages
which are kept cached lest a solution is found for
them later. High values take more memory but result
in fewer unusable solutions [default: 20].
--tx-gas-limit GAS Apply a limit of GAS as the maximum amount of gas --tx-gas-limit GAS Apply a limit of GAS as the maximum amount of gas
a single transaction may have for it to be mined. a single transaction may have for it to be mined.
--relay-set SET Set of transactions to relay. SET may be: --relay-set SET Set of transactions to relay. SET may be:
@ -162,6 +169,12 @@ Sealing/Mining Options:
more than 32 characters. more than 32 characters.
--tx-queue-size LIMIT Maximum amount of transactions in the queue (waiting --tx-queue-size LIMIT Maximum amount of transactions in the queue (waiting
to be included in next block) [default: 1024]. to be included in next block) [default: 1024].
--remove-solved Move solved blocks from the work package queue
instead of cloning them. This gives a slightly
faster import speed, but means that extra solutions
submitted for the same work package will go unused.
--notify-work URLS URLs to which work package notifications are pushed.
URLS should be a comma-delimited list of HTTP URLs.
Footprint Options: Footprint Options:
--tracing BOOL Indicates if full transaction tracing should be --tracing BOOL Indicates if full transaction tracing should be
@ -302,6 +315,9 @@ pub struct Args {
pub flag_no_token: bool, pub flag_no_token: bool,
pub flag_force_sealing: bool, pub flag_force_sealing: bool,
pub flag_reseal_on_txs: String, pub flag_reseal_on_txs: String,
pub flag_reseal_min_period: u64,
pub flag_work_queue_size: usize,
pub flag_remove_solved: bool,
pub flag_tx_gas_limit: Option<String>, pub flag_tx_gas_limit: Option<String>,
pub flag_relay_set: String, pub flag_relay_set: String,
pub flag_author: Option<String>, pub flag_author: Option<String>,
@ -311,6 +327,7 @@ pub struct Args {
pub flag_gas_cap: String, pub flag_gas_cap: String,
pub flag_extra_data: Option<String>, pub flag_extra_data: Option<String>,
pub flag_tx_queue_size: usize, pub flag_tx_queue_size: usize,
pub flag_notify_work: Option<String>,
pub flag_logging: Option<String>, pub flag_logging: Option<String>,
pub flag_version: bool, pub flag_version: bool,
pub flag_from: String, pub flag_from: String,

View File

@ -16,6 +16,7 @@
use std::env; use std::env;
use std::fs::File; use std::fs::File;
use std::time::Duration;
use std::io::{BufRead, BufReader}; use std::io::{BufRead, BufReader};
use std::net::{SocketAddr, IpAddr}; use std::net::{SocketAddr, IpAddr};
use std::path::PathBuf; use std::path::PathBuf;
@ -24,6 +25,7 @@ use docopt::Docopt;
use die::*; use die::*;
use util::*; use util::*;
use util::log::Colour::*;
use ethcore::account_provider::AccountProvider; use ethcore::account_provider::AccountProvider;
use util::network_settings::NetworkSettings; use util::network_settings::NetworkSettings;
use ethcore::client::{append_path, get_db_path, ClientConfig, DatabaseCompactionProfile, Switch, VMType}; use ethcore::client::{append_path, get_db_path, ClientConfig, DatabaseCompactionProfile, Switch, VMType};
@ -83,6 +85,10 @@ impl Configuration {
) )
} }
fn work_notify(&self) -> Vec<String> {
self.args.flag_notify_work.as_ref().map_or_else(Vec::new, |s| s.split(',').map(|s| s.to_owned()).collect())
}
pub fn miner_options(&self) -> MinerOptions { pub fn miner_options(&self) -> MinerOptions {
let (own, ext) = match self.args.flag_reseal_on_txs.as_str() { let (own, ext) = match self.args.flag_reseal_on_txs.as_str() {
"none" => (false, false), "none" => (false, false),
@ -92,6 +98,7 @@ impl Configuration {
x => die!("{}: Invalid value for --reseal option. Use --help for more information.", x) x => die!("{}: Invalid value for --reseal option. Use --help for more information.", x)
}; };
MinerOptions { MinerOptions {
new_work_notify: self.work_notify(),
force_sealing: self.args.flag_force_sealing, force_sealing: self.args.flag_force_sealing,
reseal_on_external_tx: ext, reseal_on_external_tx: ext,
reseal_on_own_tx: own, reseal_on_own_tx: own,
@ -103,6 +110,9 @@ impl Configuration {
"lenient" => PendingSet::SealingOrElseQueue, "lenient" => PendingSet::SealingOrElseQueue,
x => die!("{}: Invalid value for --relay-set option. Use --help for more information.", x) x => die!("{}: Invalid value for --relay-set option. Use --help for more information.", x)
}, },
reseal_min_period: Duration::from_millis(self.args.flag_reseal_min_period),
work_queue_size: self.args.flag_work_queue_size,
enable_resubmission: !self.args.flag_remove_solved,
} }
} }
@ -172,7 +182,7 @@ impl Configuration {
let wei_per_usd: f32 = 1.0e18 / usd_per_eth; let wei_per_usd: f32 = 1.0e18 / usd_per_eth;
let gas_per_tx: f32 = 21000.0; let gas_per_tx: f32 = 21000.0;
let wei_per_gas: f32 = wei_per_usd * usd_per_tx / gas_per_tx; let wei_per_gas: f32 = wei_per_usd * usd_per_tx / gas_per_tx;
info!("Using a conversion rate of Ξ1 = US${} ({} wei/gas)", usd_per_eth, wei_per_gas); info!("Using a conversion rate of Ξ1 = {} ({} wei/gas)", paint(White.bold(), format!("US${}", usd_per_eth)), paint(Yellow.bold(), format!("{}", wei_per_gas)));
U256::from_dec_str(&format!("{:.0}", wei_per_gas)).unwrap() U256::from_dec_str(&format!("{:.0}", wei_per_gas)).unwrap()
} }
} }

View File

@ -80,7 +80,7 @@ use std::thread::sleep;
use std::time::Duration; use std::time::Duration;
use rustc_serialize::hex::FromHex; use rustc_serialize::hex::FromHex;
use ctrlc::CtrlC; use ctrlc::CtrlC;
use util::{H256, ToPretty, NetworkConfiguration, PayloadInfo, Bytes, UtilError}; use util::{H256, ToPretty, NetworkConfiguration, PayloadInfo, Bytes, UtilError, paint, Colour, version};
use util::panics::{MayPanic, ForwardPanic, PanicHandler}; use util::panics::{MayPanic, ForwardPanic, PanicHandler};
use ethcore::client::{BlockID, BlockChainClient, ClientConfig, get_db_path, BlockImportError}; use ethcore::client::{BlockID, BlockChainClient, ClientConfig, get_db_path, BlockImportError};
use ethcore::error::{ImportError}; use ethcore::error::{ImportError};
@ -184,10 +184,12 @@ fn execute_client(conf: Configuration, spec: Spec, client_config: ClientConfig)
let panic_handler = PanicHandler::new_in_arc(); let panic_handler = PanicHandler::new_in_arc();
// Setup logging // Setup logging
let logger = setup_log::setup_log(&conf.args.flag_logging); let logger = setup_log::setup_log(&conf.args.flag_logging, conf.have_color());
// Raise fdlimit // Raise fdlimit
unsafe { ::fdlimit::raise_fd_limit(); } unsafe { ::fdlimit::raise_fd_limit(); }
info!("Starting {}", paint(Colour::White.bold(), format!("{}", version())));
let net_settings = conf.net_settings(&spec); let net_settings = conf.net_settings(&spec);
let sync_config = conf.sync_config(&spec); let sync_config = conf.sync_config(&spec);
@ -320,6 +322,8 @@ fn execute_export(conf: Configuration) {
// Setup panic handler // Setup panic handler
let panic_handler = PanicHandler::new_in_arc(); let panic_handler = PanicHandler::new_in_arc();
// Setup logging
let _logger = setup_log::setup_log(&conf.args.flag_logging, conf.have_color());
// Raise fdlimit // Raise fdlimit
unsafe { ::fdlimit::raise_fd_limit(); } unsafe { ::fdlimit::raise_fd_limit(); }
@ -392,6 +396,8 @@ fn execute_import(conf: Configuration) {
// Setup panic handler // Setup panic handler
let panic_handler = PanicHandler::new_in_arc(); let panic_handler = PanicHandler::new_in_arc();
// Setup logging
let _logger = setup_log::setup_log(&conf.args.flag_logging, conf.have_color());
// Raise fdlimit // Raise fdlimit
unsafe { ::fdlimit::raise_fd_limit(); } unsafe { ::fdlimit::raise_fd_limit(); }

View File

@ -19,10 +19,10 @@ use std::env;
use std::sync::Arc; use std::sync::Arc;
use time; use time;
use env_logger::LogBuilder; use env_logger::LogBuilder;
use util::{RotatingLogger}; use util::RotatingLogger;
/// Sets up the logger /// Sets up the logger
pub fn setup_log(init: &Option<String>) -> Arc<RotatingLogger> { pub fn setup_log(init: &Option<String>, enable_color: bool) -> Arc<RotatingLogger> {
use rlog::*; use rlog::*;
let mut levels = String::new(); let mut levels = String::new();
@ -43,7 +43,7 @@ pub fn setup_log(init: &Option<String>) -> Arc<RotatingLogger> {
builder.parse(s); builder.parse(s);
} }
let logs = Arc::new(RotatingLogger::new(levels)); let logs = Arc::new(RotatingLogger::new(levels, enable_color));
let logger = logs.clone(); let logger = logs.clone();
let format = move |record: &LogRecord| { let format = move |record: &LogRecord| {
let timestamp = time::strftime("%Y-%m-%d %H:%M:%S %Z", &time::now()).unwrap(); let timestamp = time::strftime("%Y-%m-%d %H:%M:%S %Z", &time::now()).unwrap();

View File

@ -504,7 +504,7 @@ impl<C, S, M, EM> Eth for EthClient<C, S, M, EM> where
let pow_hash = b.hash(); let pow_hash = b.hash();
let target = Ethash::difficulty_to_boundary(b.block().header().difficulty()); let target = Ethash::difficulty_to_boundary(b.block().header().difficulty());
let seed_hash = &self.seed_compute.lock().unwrap().get_seedhash(b.block().header().number()); let seed_hash = &self.seed_compute.lock().unwrap().get_seedhash(b.block().header().number());
to_value(&(pow_hash, H256::from_slice(&seed_hash[..]), target)) to_value(&(pow_hash, H256::from_slice(&seed_hash[..]), target, &U256::from(b.block().header().number())))
}).unwrap_or(Err(Error::internal_error())) // no work found. }).unwrap_or(Err(Error::internal_error())) // no work found.
}, },
_ => Err(Error::invalid_params()) _ => Err(Error::invalid_params())

View File

@ -17,6 +17,7 @@
//! rpc integration tests. //! rpc integration tests.
use std::sync::Arc; use std::sync::Arc;
use std::str::FromStr; use std::str::FromStr;
use std::time::Duration;
use ethcore::client::{BlockChainClient, Client, ClientConfig}; use ethcore::client::{BlockChainClient, Client, ClientConfig};
use ethcore::ids::BlockID; use ethcore::ids::BlockID;
@ -51,12 +52,16 @@ fn sync_provider() -> Arc<TestSyncProvider> {
fn miner_service(spec: Spec, accounts: Arc<AccountProvider>) -> Arc<Miner> { fn miner_service(spec: Spec, accounts: Arc<AccountProvider>) -> Arc<Miner> {
Miner::new( Miner::new(
MinerOptions { MinerOptions {
new_work_notify: vec![],
force_sealing: true, force_sealing: true,
reseal_on_external_tx: true, reseal_on_external_tx: true,
reseal_on_own_tx: true, reseal_on_own_tx: true,
tx_queue_size: 1024, tx_queue_size: 1024,
tx_gas_limit: !U256::zero(), tx_gas_limit: !U256::zero(),
pending_set: PendingSet::SealingOrElseQueue, pending_set: PendingSet::SealingOrElseQueue,
reseal_min_period: Duration::from_secs(0),
work_queue_size: 50,
enable_resubmission: true,
}, },
spec, spec,
Some(accounts) Some(accounts)

View File

@ -32,7 +32,7 @@ fn client_service() -> Arc<TestBlockChainClient> {
} }
fn logger() -> Arc<RotatingLogger> { fn logger() -> Arc<RotatingLogger> {
Arc::new(RotatingLogger::new("rpc=trace".to_owned())) Arc::new(RotatingLogger::new("rpc=trace".to_owned(), false))
} }
fn settings() -> Arc<NetworkSettings> { fn settings() -> Arc<NetworkSettings> {

View File

@ -120,7 +120,7 @@ impl<T: TimeProvider> AuthCodes<T> {
.filter_map(|f| String::from_utf8(f.to_vec()).ok()) .filter_map(|f| String::from_utf8(f.to_vec()).ok())
.collect::<Vec<String>>() .collect::<Vec<String>>()
.join("-"); .join("-");
info!(target: "signer", "New authentication token generated."); trace!(target: "signer", "New authentication token generated.");
self.codes.push(code); self.codes.push(code);
Ok(readable_code) Ok(readable_code)
} }

View File

@ -1231,6 +1231,14 @@ impl ChainSync {
rlp_stream.out() rlp_stream.out()
} }
/// creates latest block rlp for the given client
fn create_new_block_rlp(chain: &BlockChainClient, hash: &H256) -> Bytes {
let mut rlp_stream = RlpStream::new_list(2);
rlp_stream.append_raw(&chain.block(BlockID::Hash(hash.clone())).expect("Block has just been sealed; qed"), 1);
rlp_stream.append(&chain.block_total_difficulty(BlockID::Hash(hash.clone())).expect("Block has just been sealed; qed."));
rlp_stream.out()
}
/// returns peer ids that have less blocks than our chain /// returns peer ids that have less blocks than our chain
fn get_lagging_peers(&mut self, chain_info: &BlockChainInfo, io: &SyncIo) -> Vec<(PeerId, BlockNumber)> { fn get_lagging_peers(&mut self, chain_info: &BlockChainInfo, io: &SyncIo) -> Vec<(PeerId, BlockNumber)> {
let latest_hash = chain_info.best_block_hash; let latest_hash = chain_info.best_block_hash;
@ -1250,7 +1258,6 @@ impl ChainSync {
.collect::<Vec<_>>() .collect::<Vec<_>>()
} }
fn select_lagging_peers(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo) -> Vec<(PeerId, BlockNumber)> { fn select_lagging_peers(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo) -> Vec<(PeerId, BlockNumber)> {
use rand::Rng; use rand::Rng;
let mut lagging_peers = self.get_lagging_peers(chain_info, io); let mut lagging_peers = self.get_lagging_peers(chain_info, io);
@ -1263,13 +1270,24 @@ impl ChainSync {
} }
/// propagates latest block to lagging peers /// propagates latest block to lagging peers
fn propagate_blocks(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo) -> usize { fn propagate_blocks(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo, sealed: &[H256]) -> usize {
let lucky_peers = self.select_lagging_peers(chain_info, io); let lucky_peers: Vec<_> = if sealed.is_empty() {
self.select_lagging_peers(chain_info, io).iter().map(|&(id, _)| id).collect()
} else {
self.peers.keys().cloned().collect()
};
trace!(target: "sync", "Sending NewBlocks to {:?}", lucky_peers); trace!(target: "sync", "Sending NewBlocks to {:?}", lucky_peers);
let mut sent = 0; let mut sent = 0;
for (peer_id, _) in lucky_peers { for peer_id in lucky_peers {
let rlp = ChainSync::create_latest_block_rlp(io.chain()); if sealed.is_empty() {
self.send_packet(io, peer_id, NEW_BLOCK_PACKET, rlp); let rlp = ChainSync::create_latest_block_rlp(io.chain());
self.send_packet(io, peer_id, NEW_BLOCK_PACKET, rlp);
} else {
for h in sealed {
let rlp = ChainSync::create_new_block_rlp(io.chain(), h);
self.send_packet(io, peer_id, NEW_BLOCK_PACKET, rlp);
}
}
self.peers.get_mut(&peer_id).unwrap().latest_hash = chain_info.best_block_hash.clone(); self.peers.get_mut(&peer_id).unwrap().latest_hash = chain_info.best_block_hash.clone();
self.peers.get_mut(&peer_id).unwrap().latest_number = Some(chain_info.best_block_number); self.peers.get_mut(&peer_id).unwrap().latest_number = Some(chain_info.best_block_number);
sent += 1; sent += 1;
@ -1346,11 +1364,11 @@ impl ChainSync {
sent sent
} }
fn propagate_latest_blocks(&mut self, io: &mut SyncIo) { fn propagate_latest_blocks(&mut self, io: &mut SyncIo, sealed: &[H256]) {
let chain_info = io.chain().chain_info(); let chain_info = io.chain().chain_info();
if (((chain_info.best_block_number as i64) - (self.last_sent_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION { if (((chain_info.best_block_number as i64) - (self.last_sent_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION {
let hashes = self.propagate_new_hashes(&chain_info, io); let hashes = self.propagate_new_hashes(&chain_info, io);
let blocks = self.propagate_blocks(&chain_info, io); let blocks = self.propagate_blocks(&chain_info, io, sealed);
if blocks != 0 || hashes != 0 { if blocks != 0 || hashes != 0 {
trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes); trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes);
} }
@ -1365,10 +1383,10 @@ impl ChainSync {
} }
/// called when block is imported to chain, updates transactions queue and propagates the blocks /// called when block is imported to chain, updates transactions queue and propagates the blocks
pub fn chain_new_blocks(&mut self, io: &mut SyncIo, _imported: &[H256], invalid: &[H256], _enacted: &[H256], _retracted: &[H256]) { pub fn chain_new_blocks(&mut self, io: &mut SyncIo, _imported: &[H256], invalid: &[H256], _enacted: &[H256], _retracted: &[H256], sealed: &[H256]) {
if io.is_chain_queue_empty() { if io.is_chain_queue_empty() {
// Propagate latests blocks // Propagate latests blocks
self.propagate_latest_blocks(io); self.propagate_latest_blocks(io, sealed);
} }
if !invalid.is_empty() { if !invalid.is_empty() {
trace!(target: "sync", "Bad blocks in the queue, restarting"); trace!(target: "sync", "Bad blocks in the queue, restarting");
@ -1637,7 +1655,26 @@ mod tests {
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
let chain_info = client.chain_info(); let chain_info = client.chain_info();
let mut io = TestIo::new(&mut client, &mut queue, None); let mut io = TestIo::new(&mut client, &mut queue, None);
let peer_count = sync.propagate_blocks(&chain_info, &mut io); let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[]);
// 1 message should be send
assert_eq!(1, io.queue.len());
// 1 peer should be updated
assert_eq!(1, peer_count);
// NEW_BLOCK_PACKET
assert_eq!(0x07, io.queue[0].packet_id);
}
#[test]
fn sends_sealed_block() {
let mut client = TestBlockChainClient::new();
client.add_blocks(100, EachBlockWith::Uncle);
let mut queue = VecDeque::new();
let hash = client.block_hash(BlockID::Number(99)).unwrap();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
let chain_info = client.chain_info();
let mut io = TestIo::new(&mut client, &mut queue, None);
let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[hash.clone()]);
// 1 message should be send // 1 message should be send
assert_eq!(1, io.queue.len()); assert_eq!(1, io.queue.len());
@ -1761,7 +1798,7 @@ mod tests {
let chain_info = client.chain_info(); let chain_info = client.chain_info();
let mut io = TestIo::new(&mut client, &mut queue, None); let mut io = TestIo::new(&mut client, &mut queue, None);
sync.propagate_blocks(&chain_info, &mut io); sync.propagate_blocks(&chain_info, &mut io, &[]);
let data = &io.queue[0].data.clone(); let data = &io.queue[0].data.clone();
let result = sync.on_peer_new_block(&mut io, 0, &UntrustedRlp::new(data)); let result = sync.on_peer_new_block(&mut io, 0, &UntrustedRlp::new(data));
@ -1794,7 +1831,7 @@ mod tests {
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut io = TestIo::new(&mut client, &mut queue, None); let mut io = TestIo::new(&mut client, &mut queue, None);
io.chain.miner.chain_new_blocks(io.chain, &[], &[], &[], &good_blocks); io.chain.miner.chain_new_blocks(io.chain, &[], &[], &[], &good_blocks);
sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks); sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[]);
assert_eq!(io.chain.miner.status().transactions_in_future_queue, 0); assert_eq!(io.chain.miner.status().transactions_in_future_queue, 0);
assert_eq!(io.chain.miner.status().transactions_in_pending_queue, 1); assert_eq!(io.chain.miner.status().transactions_in_pending_queue, 1);
} }
@ -1808,7 +1845,7 @@ mod tests {
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut io = TestIo::new(&mut client, &mut queue, None); let mut io = TestIo::new(&mut client, &mut queue, None);
io.chain.miner.chain_new_blocks(io.chain, &[], &[], &good_blocks, &retracted_blocks); io.chain.miner.chain_new_blocks(io.chain, &[], &[], &good_blocks, &retracted_blocks);
sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks); sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks, &[]);
} }
// then // then
@ -1833,10 +1870,10 @@ mod tests {
let mut io = TestIo::new(&mut client, &mut queue, None); let mut io = TestIo::new(&mut client, &mut queue, None);
// when // when
sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks); sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[]);
assert_eq!(io.chain.miner.status().transactions_in_future_queue, 0); assert_eq!(io.chain.miner.status().transactions_in_future_queue, 0);
assert_eq!(io.chain.miner.status().transactions_in_pending_queue, 0); assert_eq!(io.chain.miner.status().transactions_in_pending_queue, 0);
sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks); sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks, &[]);
// then // then
let status = io.chain.miner.status(); let status = io.chain.miner.status();

View File

@ -196,9 +196,9 @@ impl NetworkProtocolHandler<SyncMessage> for EthSync {
#[cfg_attr(feature="dev", allow(single_match))] #[cfg_attr(feature="dev", allow(single_match))]
fn message(&self, io: &NetworkContext<SyncMessage>, message: &SyncMessage) { fn message(&self, io: &NetworkContext<SyncMessage>, message: &SyncMessage) {
match *message { match *message {
SyncMessage::NewChainBlocks { ref imported, ref invalid, ref enacted, ref retracted } => { SyncMessage::NewChainBlocks { ref imported, ref invalid, ref enacted, ref retracted, ref sealed } => {
let mut sync_io = NetSyncIo::new(io, self.chain.deref()); let mut sync_io = NetSyncIo::new(io, self.chain.deref());
self.sync.write().unwrap().chain_new_blocks(&mut sync_io, imported, invalid, enacted, retracted); self.sync.write().unwrap().chain_new_blocks(&mut sync_io, imported, invalid, enacted, retracted, sealed);
}, },
_ => {/* Ignore other messages */}, _ => {/* Ignore other messages */},
} }

View File

@ -173,6 +173,6 @@ impl TestNet {
pub fn trigger_chain_new_blocks(&mut self, peer_id: usize) { pub fn trigger_chain_new_blocks(&mut self, peer_id: usize) {
let mut peer = self.peer_mut(peer_id); let mut peer = self.peer_mut(peer_id);
peer.sync.write().unwrap().chain_new_blocks(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None), &[], &[], &[], &[]); peer.sync.write().unwrap().chain_new_blocks(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None), &[], &[], &[], &[], &[]);
} }
} }

View File

@ -37,6 +37,7 @@ vergen = "0.1"
target_info = "0.1" target_info = "0.1"
bigint = { path = "bigint" } bigint = { path = "bigint" }
chrono = "0.2" chrono = "0.2"
ansi_term = "0.7"
[features] [features]
default = [] default = []

View File

@ -99,7 +99,7 @@ impl DatabaseConfig {
DatabaseConfig { DatabaseConfig {
cache_size: Some(cache_size), cache_size: Some(cache_size),
prefix_size: None, prefix_size: None,
max_open_files: -1, max_open_files: 256,
compaction: CompactionProfile::default(), compaction: CompactionProfile::default(),
} }
} }
@ -122,7 +122,7 @@ impl Default for DatabaseConfig {
DatabaseConfig { DatabaseConfig {
cache_size: None, cache_size: None,
prefix_size: None, prefix_size: None,
max_open_files: -1, max_open_files: 256,
compaction: CompactionProfile::default(), compaction: CompactionProfile::default(),
} }
} }

View File

@ -117,6 +117,7 @@ extern crate libc;
extern crate target_info; extern crate target_info;
extern crate bigint; extern crate bigint;
extern crate chrono; extern crate chrono;
extern crate ansi_term;
pub mod standard; pub mod standard;
#[macro_use] #[macro_use]

View File

@ -20,7 +20,21 @@ use std::env;
use rlog::{LogLevelFilter}; use rlog::{LogLevelFilter};
use env_logger::LogBuilder; use env_logger::LogBuilder;
use std::sync::{RwLock, RwLockReadGuard}; use std::sync::{RwLock, RwLockReadGuard};
use std::sync::atomic::{Ordering, AtomicBool};
use arrayvec::ArrayVec; use arrayvec::ArrayVec;
pub use ansi_term::{Colour, Style};
lazy_static! {
static ref USE_COLOR: AtomicBool = AtomicBool::new(false);
}
/// Paint, using colour if desired.
pub fn paint(c: Style, t: String) -> String {
match USE_COLOR.load(Ordering::Relaxed) {
true => format!("{}", c.paint(t)),
false => t,
}
}
lazy_static! { lazy_static! {
static ref LOG_DUMMY: bool = { static ref LOG_DUMMY: bool = {
@ -57,7 +71,8 @@ impl RotatingLogger {
/// Creates new `RotatingLogger` with given levels. /// Creates new `RotatingLogger` with given levels.
/// It does not enforce levels - it's just read only. /// It does not enforce levels - it's just read only.
pub fn new(levels: String) -> Self { pub fn new(levels: String, enable_color: bool) -> Self {
USE_COLOR.store(enable_color, Ordering::Relaxed);
RotatingLogger { RotatingLogger {
levels: levels, levels: levels,
logs: RwLock::new(ArrayVec::<[_; LOG_SIZE]>::new()), logs: RwLock::new(ArrayVec::<[_; LOG_SIZE]>::new()),
@ -86,7 +101,7 @@ mod test {
use super::RotatingLogger; use super::RotatingLogger;
fn logger() -> RotatingLogger { fn logger() -> RotatingLogger {
RotatingLogger::new("test".to_owned()) RotatingLogger::new("test".to_owned(), false)
} }
#[test] #[test]

View File

@ -32,6 +32,8 @@ use misc::version;
use crypto::*; use crypto::*;
use sha3::Hashable; use sha3::Hashable;
use rlp::*; use rlp::*;
use log::Colour::White;
use log::paint;
use network::session::{Session, SessionData}; use network::session::{Session, SessionData};
use error::*; use error::*;
use io::*; use io::*;
@ -343,6 +345,7 @@ pub struct Host<Message> where Message: Send + Sync + Clone {
reserved_nodes: RwLock<HashSet<NodeId>>, reserved_nodes: RwLock<HashSet<NodeId>>,
num_sessions: AtomicUsize, num_sessions: AtomicUsize,
stopping: AtomicBool, stopping: AtomicBool,
first_time: AtomicBool,
} }
impl<Message> Host<Message> where Message: Send + Sync + Clone { impl<Message> Host<Message> where Message: Send + Sync + Clone {
@ -398,6 +401,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
reserved_nodes: RwLock::new(HashSet::new()), reserved_nodes: RwLock::new(HashSet::new()),
num_sessions: AtomicUsize::new(0), num_sessions: AtomicUsize::new(0),
stopping: AtomicBool::new(false), stopping: AtomicBool::new(false),
first_time: AtomicBool::new(true),
}; };
for n in boot_nodes { for n in boot_nodes {
@ -533,7 +537,11 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
}; };
self.info.write().unwrap().public_endpoint = Some(public_endpoint.clone()); self.info.write().unwrap().public_endpoint = Some(public_endpoint.clone());
info!("Public node URL: {}", self.external_url().unwrap());
if self.first_time.load(AtomicOrdering::Relaxed) {
info!("Public node URL: {}", paint(White.bold(), format!("{}", self.external_url().unwrap())));
self.first_time.store(false, AtomicOrdering::Relaxed);
}
// Initialize discovery. // Initialize discovery.
let discovery = { let discovery = {

View File

@ -88,7 +88,7 @@ impl NetworkProtocolHandler<TestProtocolMessage> for TestProtocol {
/// Timer function called after a timeout created with `NetworkContext::timeout`. /// Timer function called after a timeout created with `NetworkContext::timeout`.
fn timeout(&self, io: &NetworkContext<TestProtocolMessage>, timer: TimerToken) { fn timeout(&self, io: &NetworkContext<TestProtocolMessage>, timer: TimerToken) {
io.message(TestProtocolMessage { payload: 22 }); io.message(TestProtocolMessage { payload: 22 }).unwrap();
assert_eq!(timer, 0); assert_eq!(timer, 0);
self.got_timeout.store(true, AtomicOrdering::Relaxed); self.got_timeout.store(true, AtomicOrdering::Relaxed);
} }

View File

@ -27,6 +27,14 @@ pub struct UsingQueue<T> where T: Clone {
max_size: usize, max_size: usize,
} }
/// Take an item or just clone it?
pub enum GetAction {
/// Remove the item, faster but you can't get it back.
Take,
/// Clone the item, slower but you can get it again.
Clone,
}
impl<T> UsingQueue<T> where T: Clone { impl<T> UsingQueue<T> where T: Clone {
/// Create a new struct with a maximum size of `max_size`. /// Create a new struct with a maximum size of `max_size`.
pub fn new(max_size: usize) -> UsingQueue<T> { pub fn new(max_size: usize) -> UsingQueue<T> {
@ -74,6 +82,20 @@ impl<T> UsingQueue<T> where T: Clone {
self.in_use.iter().position(|r| predicate(r)).map(|i| self.in_use.remove(i)) self.in_use.iter().position(|r| predicate(r)).map(|i| self.in_use.remove(i))
} }
/// Returns `Some` item which is the first that `f` returns `true` with a reference to it
/// as a parameter or `None` if no such item exists in the queue.
pub fn clone_used_if<P>(&mut self, predicate: P) -> Option<T> where P: Fn(&T) -> bool {
self.in_use.iter().find(|r| predicate(r)).cloned()
}
/// Fork-function for `take_used_if` and `clone_used_if`.
pub fn get_used_if<P>(&mut self, action: GetAction, predicate: P) -> Option<T> where P: Fn(&T) -> bool {
match action {
GetAction::Take => self.take_used_if(predicate),
GetAction::Clone => self.clone_used_if(predicate),
}
}
/// Returns the most recently pushed block if `f` returns `true` with a reference to it as /// Returns the most recently pushed block if `f` returns `true` with a reference to it as
/// a parameter, otherwise `None`. /// a parameter, otherwise `None`.
/// Will not destroy a block if a reference to it has previously been returned by `use_last_ref`, /// Will not destroy a block if a reference to it has previously been returned by `use_last_ref`,
@ -94,18 +116,66 @@ impl<T> UsingQueue<T> where T: Clone {
} }
#[test] #[test]
fn should_find_when_pushed() { fn should_not_find_when_pushed() {
let mut q = UsingQueue::new(2); let mut q = UsingQueue::new(2);
q.push(1); q.push(1);
assert!(q.take_used_if(|i| i == &1).is_none()); assert!(q.take_used_if(|i| i == &1).is_none());
} }
#[test]
fn should_not_find_when_pushed_with_clone() {
let mut q = UsingQueue::new(2);
q.push(1);
assert!(q.clone_used_if(|i| i == &1).is_none());
}
#[test] #[test]
fn should_find_when_pushed_and_used() { fn should_find_when_pushed_and_used() {
let mut q = UsingQueue::new(2); let mut q = UsingQueue::new(2);
q.push(1); q.push(1);
q.use_last_ref(); q.use_last_ref();
assert!(q.take_used_if(|i| i == &1).is_some()); assert!(q.take_used_if(|i| i == &1).unwrap() == 1);
}
#[test]
fn should_have_same_semantics_for_get_take_clone() {
let mut q = UsingQueue::new(2);
q.push(1);
assert!(q.get_used_if(GetAction::Clone, |i| i == &1).is_none());
assert!(q.get_used_if(GetAction::Take, |i| i == &1).is_none());
q.use_last_ref();
assert!(q.get_used_if(GetAction::Clone, |i| i == &1).unwrap() == 1);
assert!(q.get_used_if(GetAction::Clone, |i| i == &1).unwrap() == 1);
assert!(q.get_used_if(GetAction::Take, |i| i == &1).unwrap() == 1);
assert!(q.get_used_if(GetAction::Clone, |i| i == &1).is_none());
assert!(q.get_used_if(GetAction::Take, |i| i == &1).is_none());
}
#[test]
fn should_find_when_pushed_and_used_with_clone() {
let mut q = UsingQueue::new(2);
q.push(1);
q.use_last_ref();
assert!(q.clone_used_if(|i| i == &1).unwrap() == 1);
}
#[test]
fn should_not_find_again_when_pushed_and_taken() {
let mut q = UsingQueue::new(2);
q.push(1);
q.use_last_ref();
assert!(q.take_used_if(|i| i == &1).unwrap() == 1);
assert!(q.clone_used_if(|i| i == &1).is_none());
}
#[test]
fn should_find_again_when_pushed_and_cloned() {
let mut q = UsingQueue::new(2);
q.push(1);
q.use_last_ref();
assert!(q.clone_used_if(|i| i == &1).unwrap() == 1);
assert!(q.clone_used_if(|i| i == &1).unwrap() == 1);
assert!(q.take_used_if(|i| i == &1).unwrap() == 1);
} }
#[test] #[test]