diff --git a/.travis.yml b/.travis.yml index 7213b8f09..6ae41379e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,11 +14,11 @@ matrix: - rust: nightly include: - rust: stable - env: FEATURES="--features travis-beta" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" + env: FEATURES="--features travis-beta" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" - rust: beta - env: FEATURES="--features travis-beta" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" + env: FEATURES="--features travis-beta" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" - rust: nightly - env: FEATURES="--features travis-nightly" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" + env: FEATURES="--features travis-nightly" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" cache: apt: true directories: @@ -33,10 +33,6 @@ addons: - libcurl4-openssl-dev - libelf-dev - libdw-dev -before_script: | - sudo add-apt-repository "deb http://ppa.launchpad.net/giskou/librocksdb/ubuntu trusty main" && - sudo apt-get update && - sudo apt-get install -y --force-yes librocksdb script: - cargo build --release --verbose ${FEATURES} - cargo test --release --verbose ${FEATURES} ${TARGETS} @@ -51,6 +47,7 @@ after_success: | ./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /usr/,/.cargo,/root/.multirust,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests target/kcov target/debug/deps/ethcore-* && ./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /usr/,/.cargo,/root/.multirust,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests target/kcov target/debug/deps/ethsync-* && ./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /usr/,/.cargo,/root/.multirust,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests target/kcov target/debug/deps/ethcore_rpc-* && + ./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /usr/,/.cargo,/root/.multirust,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests target/kcov target/debug/deps/ethminer-* && ./kcov-master/tmp/usr/local/bin/kcov --coveralls-id=${TRAVIS_JOB_ID} --exclude-pattern /usr/,/.cargo,/root/.multirust target/kcov target/debug/parity-* && [ $TRAVIS_BRANCH = master ] && [ $TRAVIS_PULL_REQUEST = false ] && diff --git a/Cargo.lock b/Cargo.lock index 8e3a4b168..8bf57cb6a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,7 +2,7 @@ name = "parity" version = "0.9.99" dependencies = [ - "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "ctrlc 1.1.1 (git+https://github.com/tomusdrw/rust-ctrlc.git)", "daemonize 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.6.78 (registry+https://github.com/rust-lang/crates.io-index)", @@ -11,6 +11,7 @@ dependencies = [ "ethcore-devtools 0.9.99", "ethcore-rpc 0.9.99", "ethcore-util 0.9.99", + "ethminer 0.9.99", "ethsync 0.9.99", "fdlimit 0.1.0", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -94,7 +95,7 @@ dependencies = [ [[package]] name = "clippy" -version = "0.0.49" +version = "0.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "regex-syntax 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -207,7 +208,7 @@ dependencies = [ name = "ethcore" version = "0.9.99" dependencies = [ - "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 0.9.99", @@ -233,10 +234,11 @@ dependencies = [ name = "ethcore-rpc" version = "0.9.99" dependencies = [ - "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 0.9.99", "ethcore 0.9.99", "ethcore-util 0.9.99", + "ethminer 0.9.99", "ethsync 0.9.99", "jsonrpc-core 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-http-server 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -256,7 +258,7 @@ dependencies = [ "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "bigint 0.1.0", "chrono 0.2.20 (registry+https://github.com/rust-lang/crates.io-index)", - "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -271,7 +273,7 @@ dependencies = [ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "rocksdb 0.4.2 (git+https://github.com/arkpar/rust-rocksdb.git)", + "rocksdb 0.4.3 (git+https://github.com/arkpar/rust-rocksdb.git)", "rust-crypto 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -285,18 +287,31 @@ dependencies = [ ] [[package]] -name = "ethsync" +name = "ethminer" version = "0.9.99" dependencies = [ - "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 0.9.99", "ethcore-util 0.9.99", + "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ethsync" +version = "0.9.99" +dependencies = [ + "clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore 0.9.99", + "ethcore-util 0.9.99", + "ethminer 0.9.99", "heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -466,8 +481,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "librocksdb-sys" -version = "0.2.2" -source = "git+https://github.com/arkpar/rust-rocksdb.git#a4f89fea20ee3ae92b692df65d56426a5c0b6fd5" +version = "0.2.3" +source = "git+https://github.com/arkpar/rust-rocksdb.git#ebb602fc74b4067f9f51310bdc0401b8e59b7156" dependencies = [ "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -688,11 +703,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "rocksdb" -version = "0.4.2" -source = "git+https://github.com/arkpar/rust-rocksdb.git#a4f89fea20ee3ae92b692df65d56426a5c0b6fd5" +version = "0.4.3" +source = "git+https://github.com/arkpar/rust-rocksdb.git#ebb602fc74b4067f9f51310bdc0401b8e59b7156" dependencies = [ "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "librocksdb-sys 0.2.2 (git+https://github.com/arkpar/rust-rocksdb.git)", + "librocksdb-sys 0.2.3 (git+https://github.com/arkpar/rust-rocksdb.git)", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 77d1e57ae..351041119 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,18 +19,19 @@ ctrlc = { git = "https://github.com/tomusdrw/rust-ctrlc.git" } fdlimit = { path = "util/fdlimit" } daemonize = "0.2" number_prefix = "0.2" -clippy = { version = "0.0.49", optional = true } +rpassword = "0.1" +clippy = { version = "0.0.50", optional = true } ethcore = { path = "ethcore" } ethcore-util = { path = "util" } ethsync = { path = "sync" } +ethminer = { path = "miner" } ethcore-devtools = { path = "devtools" } ethcore-rpc = { path = "rpc", optional = true } -rpassword = "0.1" [features] default = ["rpc"] rpc = ["ethcore-rpc"] -dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev"] +dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethminer/dev"] travis-beta = ["ethcore/json-tests"] travis-nightly = ["ethcore/json-tests", "dev"] diff --git a/cov.sh b/cov.sh index a1fa29e46..d60ef223d 100755 --- a/cov.sh +++ b/cov.sh @@ -15,12 +15,23 @@ if ! type kcov > /dev/null; then exit 1 fi -cargo test -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity --no-run || exit $? +cargo test \ + -p ethash \ + -p ethcore-util \ + -p ethcore \ + -p ethsync \ + -p ethcore-rpc \ + -p parity \ + -p ethminer \ + --no-run || exit $? rm -rf target/coverage mkdir -p target/coverage -kcov --exclude-pattern ~/.multirust,rocksdb,secp256k1,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests --include-pattern src --verify target/coverage target/debug/deps/ethcore-* -kcov --exclude-pattern ~/.multirust,rocksdb,secp256k1,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests --include-pattern src --verify target/coverage target/debug/deps/ethash-* -kcov --exclude-pattern ~/.multirust,rocksdb,secp256k1,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests --include-pattern src --verify target/coverage target/debug/deps/ethcore_util-* -kcov --exclude-pattern ~/.multirust,rocksdb,secp256k1,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests --include-pattern src --verify target/coverage target/debug/deps/ethsync-* -kcov --exclude-pattern ~/.multirust,rocksdb,secp256k1,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests --include-pattern src --verify target/coverage target/debug/deps/ethcore_rpc-* + +EXCLUDE="~/.multirust,rocksdb,secp256k1,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests" +kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethcore-* +kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethash-* +kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethcore_util-* +kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethsync-* +kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethcore_rpc-* +kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethminer-* xdg-open target/coverage/index.html diff --git a/doc.sh b/doc.sh index 2fd5ac20f..a5e5e2e13 100755 --- a/doc.sh +++ b/doc.sh @@ -1,4 +1,11 @@ #!/bin/sh # generate documentation only for partiy and ethcore libraries -cargo doc --no-deps --verbose -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity +cargo doc --no-deps --verbose \ + -p ethash \ + -p ethcore-util \ + -p ethcore \ + -p ethsync \ + -p ethcore-rpc \ + -p parity \ + -p ethminer diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index 5ef83842f..be4212f5d 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -17,7 +17,7 @@ ethcore-util = { path = "../util" } evmjit = { path = "../evmjit", optional = true } ethash = { path = "../ethash" } num_cpus = "0.2" -clippy = { version = "0.0.49", optional = true } +clippy = { version = "0.0.50", optional = true } crossbeam = "0.1.5" lazy_static = "0.1" ethcore-devtools = { path = "../devtools" } diff --git a/ethcore/src/account_db.rs b/ethcore/src/account_db.rs index 026e813f5..f95ec53a1 100644 --- a/ethcore/src/account_db.rs +++ b/ethcore/src/account_db.rs @@ -97,6 +97,9 @@ impl<'db> HashDB for AccountDBMut<'db>{ } fn insert(&mut self, value: &[u8]) -> H256 { + if value == &NULL_RLP { + return SHA3_NULL_RLP.clone(); + } let k = value.sha3(); let ak = combine_key(&self.address, &k); self.db.emplace(ak, value.to_vec()); @@ -104,11 +107,17 @@ impl<'db> HashDB for AccountDBMut<'db>{ } fn emplace(&mut self, key: H256, value: Bytes) { + if key == SHA3_NULL_RLP { + return; + } let key = combine_key(&self.address, &key); self.db.emplace(key, value.to_vec()) } fn kill(&mut self, key: &H256) { + if key == &SHA3_NULL_RLP { + return; + } let key = combine_key(&self.address, key); self.db.kill(&key) } diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs index 50db23dfe..042df1dc1 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/block_queue.rs @@ -523,7 +523,7 @@ mod tests { let engine = spec.to_engine().unwrap(); let mut config = BlockQueueConfig::default(); config.max_mem_use = super::MIN_MEM_LIMIT; // empty queue uses about 15000 - let mut queue = BlockQueue::new(config, Arc::new(engine), IoChannel::disconnected()); + let queue = BlockQueue::new(config, Arc::new(engine), IoChannel::disconnected()); assert!(!queue.queue_info().is_full()); let mut blocks = get_good_dummy_block_seq(50); for b in blocks.drain(..) { diff --git a/ethcore/src/chainfilter/tests.rs b/ethcore/src/chainfilter/tests.rs index 08af44720..7dac29f11 100644 --- a/ethcore/src/chainfilter/tests.rs +++ b/ethcore/src/chainfilter/tests.rs @@ -28,9 +28,15 @@ pub struct MemoryCache { blooms: HashMap, } +impl Default for MemoryCache { + fn default() -> Self { + MemoryCache::new() + } +} + impl MemoryCache { /// Default constructor for MemoryCache - pub fn new() -> MemoryCache { + pub fn new() -> Self { MemoryCache { blooms: HashMap::new() } } diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 81e1ef287..95084ffac 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -17,7 +17,6 @@ //! Blockchain database client. use std::marker::PhantomData; -use std::sync::atomic::AtomicBool; use util::*; use util::panics::*; use views::BlockView; @@ -31,12 +30,12 @@ use service::{NetSyncMessage, SyncMessage}; use env_info::LastHashes; use verification::*; use block::*; -use transaction::LocalizedTransaction; +use transaction::{LocalizedTransaction, SignedTransaction}; use extras::TransactionAddress; use filter::Filter; use log_entry::LocalizedLogEntry; use block_queue::{BlockQueue, BlockQueueInfo}; -use blockchain::{BlockChain, BlockProvider, TreeRoute}; +use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; use client::{BlockId, TransactionId, ClientConfig, BlockChainClient}; pub use blockchain::CacheSize as BlockChainCacheSize; @@ -106,12 +105,6 @@ pub struct Client where V: Verifier { report: RwLock, import_lock: Mutex<()>, panic_handler: Arc, - - // for sealing... - sealing_enabled: AtomicBool, - sealing_block: Mutex>, - author: RwLock
, - extra_data: RwLock, verifier: PhantomData, } @@ -159,10 +152,6 @@ impl Client where V: Verifier { report: RwLock::new(Default::default()), import_lock: Mutex::new(()), panic_handler: panic_handler, - sealing_enabled: AtomicBool::new(false), - sealing_block: Mutex::new(None), - author: RwLock::new(Address::new()), - extra_data: RwLock::new(Vec::new()), verifier: PhantomData, })) } @@ -233,12 +222,39 @@ impl Client where V: Verifier { Ok(closed_block) } + fn calculate_enacted_retracted(&self, import_results: Vec) -> (Vec, Vec) { + fn map_to_vec(map: Vec<(H256, bool)>) -> Vec { + map.into_iter().map(|(k, _v)| k).collect() + } + + // In ImportRoute we get all the blocks that have been enacted and retracted by single insert. + // Because we are doing multiple inserts some of the blocks that were enacted in import `k` + // could be retracted in import `k+1`. This is why to understand if after all inserts + // the block is enacted or retracted we iterate over all routes and at the end final state + // will be in the hashmap + let map = import_results.into_iter().fold(HashMap::new(), |mut map, route| { + for hash in route.enacted { + map.insert(hash, true); + } + for hash in route.retracted { + map.insert(hash, false); + } + map + }); + + // Split to enacted retracted (using hashmap value) + let (enacted, retracted) = map.into_iter().partition(|&(_k, v)| v); + // And convert tuples to keys + (map_to_vec(enacted), map_to_vec(retracted)) + } + /// This is triggered by a message coming from a block queue when the block is ready for insertion pub fn import_verified_blocks(&self, io: &IoChannel) -> usize { let max_blocks_to_import = 128; - let mut good_blocks = Vec::with_capacity(max_blocks_to_import); - let mut bad_blocks = HashSet::new(); + let mut imported_blocks = Vec::with_capacity(max_blocks_to_import); + let mut invalid_blocks = HashSet::new(); + let mut import_results = Vec::with_capacity(max_blocks_to_import); let _import_lock = self.import_lock.lock(); let blocks = self.block_queue.drain(max_blocks_to_import); @@ -248,16 +264,16 @@ impl Client where V: Verifier { for block in blocks { let header = &block.header; - if bad_blocks.contains(&header.parent_hash) { - bad_blocks.insert(header.hash()); + if invalid_blocks.contains(&header.parent_hash) { + invalid_blocks.insert(header.hash()); continue; } let closed_block = self.check_and_close_block(&block); if let Err(_) = closed_block { - bad_blocks.insert(header.hash()); + invalid_blocks.insert(header.hash()); break; } - good_blocks.push(header.hash()); + imported_blocks.push(header.hash()); // Are we committing an era? let ancient = if header.number() >= HISTORY { @@ -276,37 +292,41 @@ impl Client where V: Verifier { // And update the chain after commit to prevent race conditions // (when something is in chain but you are not able to fetch details) - self.chain.insert_block(&block.bytes, receipts); + let route = self.chain.insert_block(&block.bytes, receipts); + import_results.push(route); self.report.write().unwrap().accrue_block(&block); trace!(target: "client", "Imported #{} ({})", header.number(), header.hash()); } - let imported = good_blocks.len(); - let bad_blocks = bad_blocks.into_iter().collect::>(); + let imported = imported_blocks.len(); + let invalid_blocks = invalid_blocks.into_iter().collect::>(); { - if !bad_blocks.is_empty() { - self.block_queue.mark_as_bad(&bad_blocks); + if !invalid_blocks.is_empty() { + self.block_queue.mark_as_bad(&invalid_blocks); } - if !good_blocks.is_empty() { - self.block_queue.mark_as_good(&good_blocks); + if !imported_blocks.is_empty() { + self.block_queue.mark_as_good(&imported_blocks); } } { - if !good_blocks.is_empty() && self.block_queue.queue_info().is_empty() { + if !imported_blocks.is_empty() && self.block_queue.queue_info().is_empty() { + let (enacted, retracted) = self.calculate_enacted_retracted(import_results); io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks { - good: good_blocks, - bad: bad_blocks, - // TODO [todr] were to take those from? - retracted: vec![], + imported: imported_blocks, + invalid: invalid_blocks, + enacted: enacted, + retracted: retracted, })).unwrap(); } } - if self.chain_info().best_block_hash != original_best && self.sealing_enabled.load(atomic::Ordering::Relaxed) { - self.prepare_sealing(); + { + if self.chain_info().best_block_hash != original_best { + io.send(NetworkIoMessage::User(SyncMessage::NewChainHead)).unwrap(); + } } imported @@ -357,52 +377,59 @@ impl Client where V: Verifier { BlockId::Latest => Some(self.chain.best_block_number()) } } - - /// Get the author that we will seal blocks as. - pub fn author(&self) -> Address { - self.author.read().unwrap().clone() - } - - /// Set the author that we will seal blocks as. - pub fn set_author(&self, author: Address) { - *self.author.write().unwrap() = author; - } - - /// Get the extra_data that we will seal blocks wuth. - pub fn extra_data(&self) -> Bytes { - self.extra_data.read().unwrap().clone() - } - - /// Set the extra_data that we will seal blocks with. - pub fn set_extra_data(&self, extra_data: Bytes) { - *self.extra_data.write().unwrap() = extra_data; - } - - /// New chain head event. Restart mining operation. - pub fn prepare_sealing(&self) { - let h = self.chain.best_block_hash(); - let mut b = OpenBlock::new( - self.engine.deref().deref(), - self.state_db.lock().unwrap().spawn(), - match self.chain.block_header(&h) { Some(ref x) => x, None => {return;} }, - self.build_last_hashes(h.clone()), - self.author(), - self.extra_data() - ); - - self.chain.find_uncle_headers(&h, self.engine.deref().deref().maximum_uncle_age()).unwrap().into_iter().take(self.engine.deref().deref().maximum_uncle_count()).foreach(|h| { b.push_uncle(h).unwrap(); }); - - // TODO: push transactions. - - let b = b.close(); - trace!("Sealing: number={}, hash={}, diff={}", b.hash(), b.block().header().difficulty(), b.block().header().number()); - *self.sealing_block.lock().unwrap() = Some(b); - } } -// TODO: need MinerService MinerIoHandler - impl BlockChainClient for Client where V: Verifier { + + + // TODO [todr] Should be moved to miner crate eventually. + fn try_seal(&self, block: ClosedBlock, seal: Vec) -> Result { + block.try_seal(self.engine.deref().deref(), seal) + } + + // TODO [todr] Should be moved to miner crate eventually. + fn prepare_sealing(&self, author: Address, extra_data: Bytes, transactions: Vec) -> Option { + let engine = self.engine.deref().deref(); + let h = self.chain.best_block_hash(); + + let mut b = OpenBlock::new( + engine, + self.state_db.lock().unwrap().spawn(), + match self.chain.block_header(&h) { Some(ref x) => x, None => {return None} }, + self.build_last_hashes(h.clone()), + author, + extra_data, + ); + + // Add uncles + self.chain + .find_uncle_headers(&h, engine.maximum_uncle_age()) + .unwrap() + .into_iter() + .take(engine.maximum_uncle_count()) + .foreach(|h| { + b.push_uncle(h).unwrap(); + }); + + // Add transactions + let block_number = b.block().header().number(); + for tx in transactions { + let import = b.push_transaction(tx, None); + if let Err(e) = import { + trace!("Error adding transaction to block: number={}. Error: {:?}", block_number, e); + } + } + + // And close + let b = b.close(); + trace!("Sealing: number={}, hash={}, diff={}", + b.block().header().number(), + b.hash(), + b.block().header().difficulty() + ); + Some(b) + } + fn block_header(&self, id: BlockId) -> Option { Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec())) } @@ -449,6 +476,14 @@ impl BlockChainClient for Client where V: Verifier { self.state().code(address) } + fn balance(&self, address: &Address) -> U256 { + self.state().balance(address) + } + + fn storage_at(&self, address: &Address, position: &H256) -> H256 { + self.state().storage_at(address, position) + } + fn transaction(&self, id: TransactionId) -> Option { match id { TransactionId::Hash(ref hash) => self.chain.transaction_address(hash), @@ -553,39 +588,6 @@ impl BlockChainClient for Client where V: Verifier { }) .collect() } - - /// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock. - fn sealing_block(&self) -> &Mutex> { - if self.sealing_block.lock().unwrap().is_none() { - self.sealing_enabled.store(true, atomic::Ordering::Relaxed); - // TODO: Above should be on a timer that resets after two blocks have arrived without being asked for. - self.prepare_sealing(); - } - &self.sealing_block - } - - /// Submit `seal` as a valid solution for the header of `pow_hash`. - /// Will check the seal, but not actually insert the block into the chain. - fn submit_seal(&self, pow_hash: H256, seal: Vec) -> Result<(), Error> { - let mut maybe_b = self.sealing_block.lock().unwrap(); - match *maybe_b { - Some(ref b) if b.hash() == pow_hash => {} - _ => { return Err(Error::PowHashInvalid); } - } - - let b = maybe_b.take(); - match b.unwrap().try_seal(self.engine.deref().deref(), seal) { - Err(old) => { - *maybe_b = Some(old); - Err(Error::PowInvalid) - } - Ok(sealed) => { - // TODO: commit DB from `sealed.drain` and make a VerifiedBlock to skip running the transactions twice. - try!(self.import_block(sealed.rlp_bytes())); - Ok(()) - } - } - } } impl MayPanic for Client { diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index afdfb200a..e46d0b570 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -26,18 +26,17 @@ pub use self::config::{ClientConfig, BlockQueueConfig, BlockChainConfig}; pub use self::ids::{BlockId, TransactionId}; pub use self::test_client::{TestBlockChainClient, EachBlockWith}; -use std::sync::Mutex; use util::bytes::Bytes; use util::hash::{Address, H256, H2048}; use util::numbers::U256; use blockchain::TreeRoute; use block_queue::BlockQueueInfo; -use block::ClosedBlock; +use block::{ClosedBlock, SealedBlock}; use header::BlockNumber; -use transaction::LocalizedTransaction; +use transaction::{LocalizedTransaction, SignedTransaction}; use log_entry::LocalizedLogEntry; use filter::Filter; -use error::{ImportResult, Error}; +use error::{ImportResult}; /// Blockchain database client. Owns and manages a blockchain and a block queue. pub trait BlockChainClient : Sync + Send { @@ -66,6 +65,12 @@ pub trait BlockChainClient : Sync + Send { /// Get address code. fn code(&self, address: &Address) -> Option; + /// Get address balance. + fn balance(&self, address: &Address) -> U256; + + /// Get value of the storage at given position. + fn storage_at(&self, address: &Address, position: &H256) -> H256; + /// Get transaction with given hash. fn transaction(&self, id: TransactionId) -> Option; @@ -103,11 +108,13 @@ pub trait BlockChainClient : Sync + Send { /// Returns logs matching given filter. fn logs(&self, filter: Filter) -> Vec; - /// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock. - fn sealing_block(&self) -> &Mutex>; + // TODO [todr] Should be moved to miner crate eventually. + /// Returns ClosedBlock prepared for sealing. + fn prepare_sealing(&self, author: Address, extra_data: Bytes, transactions: Vec) -> Option; + + // TODO [todr] Should be moved to miner crate eventually. + /// Attempts to seal given block. Returns `SealedBlock` on success and the same block in case of error. + fn try_seal(&self, block: ClosedBlock, seal: Vec) -> Result; - /// Submit `seal` as a valid solution for the header of `pow_hash`. - /// Will check the seal, but not actually insert the block into the chain. - fn submit_seal(&self, pow_hash: H256, seal: Vec) -> Result<(), Error>; } diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 76e13fe9f..e8d956bb6 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -17,7 +17,7 @@ //! Test client. use util::*; -use transaction::{Transaction, LocalizedTransaction, Action}; +use transaction::{Transaction, LocalizedTransaction, SignedTransaction, Action}; use blockchain::TreeRoute; use client::{BlockChainClient, BlockChainInfo, BlockStatus, BlockId, TransactionId}; use header::{Header as BlockHeader, BlockNumber}; @@ -25,9 +25,10 @@ use filter::Filter; use log_entry::LocalizedLogEntry; use receipt::Receipt; use extras::BlockReceipts; -use error::{ImportResult, Error}; +use error::{ImportResult}; + use block_queue::BlockQueueInfo; -use block::ClosedBlock; +use block::{SealedBlock, ClosedBlock}; /// Test client. pub struct TestBlockChainClient { @@ -41,6 +42,12 @@ pub struct TestBlockChainClient { pub last_hash: RwLock, /// Difficulty. pub difficulty: RwLock, + /// Balances. + pub balances: RwLock>, + /// Storage. + pub storage: RwLock>, + /// Code. + pub code: RwLock>, } #[derive(Clone)] @@ -56,9 +63,15 @@ pub enum EachBlockWith { UncleAndTransaction } +impl Default for TestBlockChainClient { + fn default() -> Self { + TestBlockChainClient::new() + } +} + impl TestBlockChainClient { /// Creates new test client. - pub fn new() -> TestBlockChainClient { + pub fn new() -> Self { let mut client = TestBlockChainClient { blocks: RwLock::new(HashMap::new()), @@ -66,12 +79,30 @@ impl TestBlockChainClient { genesis_hash: H256::new(), last_hash: RwLock::new(H256::new()), difficulty: RwLock::new(From::from(0)), + balances: RwLock::new(HashMap::new()), + storage: RwLock::new(HashMap::new()), + code: RwLock::new(HashMap::new()), }; client.add_blocks(1, EachBlockWith::Nothing); // add genesis block client.genesis_hash = client.last_hash.read().unwrap().clone(); client } + /// Set the balance of account `address` to `balance`. + pub fn set_balance(&mut self, address: Address, balance: U256) { + self.balances.write().unwrap().insert(address, balance); + } + + /// Set `code` at `address`. + pub fn set_code(&mut self, address: Address, code: Bytes) { + self.code.write().unwrap().insert(address, code); + } + + /// Set storage `position` to `value` for account `address`. + pub fn set_storage(&mut self, address: Address, position: H256, value: H256) { + self.storage.write().unwrap().insert((address, position), value); + } + /// Add blocks to test client. pub fn add_blocks(&mut self, count: usize, with: EachBlockWith) { let len = self.numbers.read().unwrap().len(); @@ -162,8 +193,16 @@ impl BlockChainClient for TestBlockChainClient { U256::zero() } - fn code(&self, _address: &Address) -> Option { - unimplemented!(); + fn code(&self, address: &Address) -> Option { + self.code.read().unwrap().get(address).cloned() + } + + fn balance(&self, address: &Address) -> U256 { + self.balances.read().unwrap().get(address).cloned().unwrap_or_else(U256::zero) + } + + fn storage_at(&self, address: &Address, position: &H256) -> H256 { + self.storage.read().unwrap().get(&(address.clone(), position.clone())).cloned().unwrap_or_else(H256::new) } fn transaction(&self, _id: TransactionId) -> Option { @@ -178,12 +217,12 @@ impl BlockChainClient for TestBlockChainClient { unimplemented!(); } - fn sealing_block(&self) -> &Mutex> { - unimplemented!(); + fn prepare_sealing(&self, _author: Address, _extra_data: Bytes, _transactions: Vec) -> Option { + unimplemented!() } - fn submit_seal(&self, _pow_hash: H256, _seal: Vec) -> Result<(), Error> { - unimplemented!(); + fn try_seal(&self, _block: ClosedBlock, _seal: Vec) -> Result { + unimplemented!() } fn block_header(&self, id: BlockId) -> Option { diff --git a/ethcore/src/error.rs b/ethcore/src/error.rs index 824d8da90..72127c754 100644 --- a/ethcore/src/error.rs +++ b/ethcore/src/error.rs @@ -63,8 +63,15 @@ pub enum ExecutionError { } #[derive(Debug)] -/// Errors concerning transaction proessing. +/// Errors concerning transaction processing. pub enum TransactionError { + /// Transaction's gas price is below threshold. + InsufficientGasPrice { + /// Minimal expected gas price + minimal: U256, + /// Transaction gas price + got: U256 + }, /// Transaction's gas limit (aka gas) is invalid. InvalidGasLimit(OutOfBounds), } diff --git a/ethcore/src/externalities.rs b/ethcore/src/externalities.rs index 598921580..d37bc20fb 100644 --- a/ethcore/src/externalities.rs +++ b/ethcore/src/externalities.rs @@ -301,8 +301,14 @@ mod tests { env_info: EnvInfo } + impl Default for TestSetup { + fn default() -> Self { + TestSetup::new() + } + } + impl TestSetup { - fn new() -> TestSetup { + fn new() -> Self { TestSetup { state: get_temp_state(), engine: get_test_spec().to_engine().unwrap(), diff --git a/ethcore/src/log_entry.rs b/ethcore/src/log_entry.rs index a75e6fcc1..63d09b4f0 100644 --- a/ethcore/src/log_entry.rs +++ b/ethcore/src/log_entry.rs @@ -111,7 +111,7 @@ mod tests { let bloom = H2048::from_str("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap(); let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); let log = LogEntry { - address: address, + address: address, topics: vec![], data: vec![] }; diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index 6daf0d7b6..bcfe7724f 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -28,12 +28,16 @@ pub enum SyncMessage { /// New block has been imported into the blockchain NewChainBlocks { /// Hashes of blocks imported to blockchain - good: Vec, - /// Hashes of blocks not imported to blockchain - bad: Vec, + imported: Vec, + /// Hashes of blocks not imported to blockchain (because were invalid) + invalid: Vec, /// Hashes of blocks that were removed from canonical chain retracted: Vec, + /// Hashes of blocks that are now included in cannonical chain + enacted: Vec, }, + /// Best Block Hash in chain has been changed + NewChainHead, /// A block is ready BlockVerified, } diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index 43b426560..174580d20 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -143,16 +143,9 @@ fn can_mine() { let dummy_blocks = get_good_dummy_block_seq(2); let client_result = get_test_client_with_blocks(vec![dummy_blocks[0].clone()]); let client = client_result.reference(); - let b = client.sealing_block(); - let pow_hash = { - let u = b.lock().unwrap(); - match *u { - Some(ref b) => { - assert_eq!(*b.block().header().parent_hash(), BlockView::new(&dummy_blocks[0]).header_view().sha3()); - b.hash() - } - None => { panic!(); } - } - }; - assert!(client.submit_seal(pow_hash, vec![]).is_ok()); + + let b = client.prepare_sealing(Address::default(), vec![], vec![]).unwrap(); + + assert_eq!(*b.block().header().parent_hash(), BlockView::new(&dummy_blocks[0]).header_view().sha3()); + assert!(client.try_seal(b, vec![]).is_ok()); } diff --git a/ethcore/src/verification/noop_verifier.rs b/ethcore/src/verification/noop_verifier.rs index ae2a153fe..20c15c3f1 100644 --- a/ethcore/src/verification/noop_verifier.rs +++ b/ethcore/src/verification/noop_verifier.rs @@ -20,6 +20,7 @@ use error::Error; use header::Header; use super::Verifier; +#[allow(dead_code)] pub struct NoopVerifier; impl Verifier for NoopVerifier { diff --git a/ethcore/src/verification/verification.rs b/ethcore/src/verification/verification.rs index ed3db3791..60cbed56c 100644 --- a/ethcore/src/verification/verification.rs +++ b/ethcore/src/verification/verification.rs @@ -255,8 +255,14 @@ mod tests { numbers: HashMap, } + impl Default for TestBlockChain { + fn default() -> Self { + TestBlockChain::new() + } + } + impl TestBlockChain { - pub fn new() -> TestBlockChain { + pub fn new() -> Self { TestBlockChain { blocks: HashMap::new(), numbers: HashMap::new(), diff --git a/hook.sh b/hook.sh index 9780541fe..58bff20ab 100755 --- a/hook.sh +++ b/hook.sh @@ -7,6 +7,6 @@ echo "set -e" >> $FILE echo "cargo build --release --features dev" >> $FILE # Build tests echo "cargo test --no-run --features dev \\" >> $FILE -echo " -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity" >> $FILE +echo " -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer" >> $FILE echo "" >> $FILE chmod +x $FILE diff --git a/miner/Cargo.toml b/miner/Cargo.toml new file mode 100644 index 000000000..b450ece73 --- /dev/null +++ b/miner/Cargo.toml @@ -0,0 +1,24 @@ +[package] +description = "Ethminer library" +homepage = "http://ethcore.io" +license = "GPL-3.0" +name = "ethminer" +version = "0.9.99" +authors = ["Ethcore "] +build = "build.rs" + +[build-dependencies] +rustc_version = "0.1" + +[dependencies] +ethcore-util = { path = "../util" } +ethcore = { path = "../ethcore" } +log = "0.3" +env_logger = "0.3" +rustc-serialize = "0.3" +rayon = "0.3.1" +clippy = { version = "0.0.50", optional = true } + +[features] +default = [] +dev = ["clippy"] diff --git a/miner/build.rs b/miner/build.rs new file mode 100644 index 000000000..41b9a1b3e --- /dev/null +++ b/miner/build.rs @@ -0,0 +1,25 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +extern crate rustc_version; + +use rustc_version::{version_meta, Channel}; + +fn main() { + if let Channel::Nightly = version_meta().channel { + println!("cargo:rustc-cfg=nightly"); + } +} diff --git a/miner/src/lib.rs b/miner/src/lib.rs new file mode 100644 index 000000000..a431bd44e --- /dev/null +++ b/miner/src/lib.rs @@ -0,0 +1,111 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +#![warn(missing_docs)] +#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] +#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] + +//! Miner module +//! Keeps track of transactions and mined block. +//! +//! Usage example: +//! +//! ```rust +//! extern crate ethcore_util as util; +//! extern crate ethcore; +//! extern crate ethminer; +//! use std::ops::Deref; +//! use std::env; +//! use std::sync::Arc; +//! use util::network::{NetworkService, NetworkConfiguration}; +//! use ethcore::client::{Client, ClientConfig, BlockChainClient}; +//! use ethcore::ethereum; +//! use ethminer::{Miner, MinerService}; +//! +//! fn main() { +//! let mut service = NetworkService::start(NetworkConfiguration::new()).unwrap(); +//! let dir = env::temp_dir(); +//! let client = Client::new(ClientConfig::default(), ethereum::new_frontier(), &dir, service.io().channel()).unwrap(); +//! +//! let miner: Miner = Miner::default(); +//! // get status +//! assert_eq!(miner.status().transaction_queue_pending, 0); +//! +//! // Check block for sealing +//! miner.prepare_sealing(client.deref()); +//! assert!(miner.sealing_block(client.deref()).lock().unwrap().is_some()); +//! } +//! ``` + + +#[macro_use] +extern crate log; +#[macro_use] +extern crate ethcore_util as util; +extern crate ethcore; +extern crate env_logger; +extern crate rayon; + +mod miner; +mod transaction_queue; + +pub use transaction_queue::TransactionQueue; +pub use miner::{Miner}; + +use std::sync::Mutex; +use util::{H256, U256, Address, Bytes}; +use ethcore::client::{BlockChainClient}; +use ethcore::block::{ClosedBlock}; +use ethcore::error::{Error}; +use ethcore::transaction::SignedTransaction; + +/// Miner client API +pub trait MinerService : Send + Sync { + + /// Returns miner's status. + fn status(&self) -> MinerStatus; + + /// Imports transactions to transaction queue. + fn import_transactions(&self, transactions: Vec, fetch_nonce: T) -> Result<(), Error> + where T: Fn(&Address) -> U256; + + /// Returns hashes of transactions currently in pending + fn pending_transactions_hashes(&self) -> Vec; + + /// Removes all transactions from the queue and restart mining operation. + fn clear_and_reset(&self, chain: &BlockChainClient); + + /// Called when blocks are imported to chain, updates transactions queue. + fn chain_new_blocks(&self, chain: &BlockChainClient, imported: &[H256], invalid: &[H256], enacted: &[H256], retracted: &[H256]); + + /// New chain head event. Restart mining operation. + fn prepare_sealing(&self, chain: &BlockChainClient); + + /// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock. + fn sealing_block(&self, chain: &BlockChainClient) -> &Mutex>; + + /// Submit `seal` as a valid solution for the header of `pow_hash`. + /// Will check the seal, but not actually insert the block into the chain. + fn submit_seal(&self, chain: &BlockChainClient, pow_hash: H256, seal: Vec) -> Result<(), Error>; +} + +/// Mining status +pub struct MinerStatus { + /// Number of transactions in queue with state `pending` (ready to be included in block) + pub transaction_queue_pending: usize, + /// Number of transactions in queue with state `future` (not yet ready to be included in block) + pub transaction_queue_future: usize, +} diff --git a/miner/src/miner.rs b/miner/src/miner.rs new file mode 100644 index 000000000..ad403150d --- /dev/null +++ b/miner/src/miner.rs @@ -0,0 +1,191 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use rayon::prelude::*; +use std::sync::{Mutex, RwLock, Arc}; +use std::sync::atomic; +use std::sync::atomic::AtomicBool; + +use util::{H256, U256, Address, Bytes}; +use ethcore::views::{BlockView}; +use ethcore::client::{BlockChainClient, BlockId}; +use ethcore::block::{ClosedBlock}; +use ethcore::error::{Error}; +use ethcore::transaction::SignedTransaction; +use super::{MinerService, MinerStatus, TransactionQueue}; + +/// Keeps track of transactions using priority queue and holds currently mined block. +pub struct Miner { + transaction_queue: Mutex, + + // for sealing... + sealing_enabled: AtomicBool, + sealing_block: Mutex>, + author: RwLock
, + extra_data: RwLock, +} + +impl Default for Miner { + fn default() -> Miner { + Miner { + transaction_queue: Mutex::new(TransactionQueue::new()), + sealing_enabled: AtomicBool::new(false), + sealing_block: Mutex::new(None), + author: RwLock::new(Address::default()), + extra_data: RwLock::new(Vec::new()), + } + } +} + +impl Miner { + /// Creates new instance of miner + pub fn new() -> Arc { + Arc::new(Miner::default()) + } + + /// Get the author that we will seal blocks as. + fn author(&self) -> Address { + *self.author.read().unwrap() + } + + /// Get the extra_data that we will seal blocks wuth. + fn extra_data(&self) -> Bytes { + self.extra_data.read().unwrap().clone() + } + + /// Set the author that we will seal blocks as. + pub fn set_author(&self, author: Address) { + *self.author.write().unwrap() = author; + } + + /// Set the extra_data that we will seal blocks with. + pub fn set_extra_data(&self, extra_data: Bytes) { + *self.extra_data.write().unwrap() = extra_data; + } + + /// Set minimal gas price of transaction to be accepted for mining. + pub fn set_minimal_gas_price(&self, min_gas_price: U256) { + self.transaction_queue.lock().unwrap().set_minimal_gas_price(min_gas_price); + } +} + +impl MinerService for Miner { + + fn clear_and_reset(&self, chain: &BlockChainClient) { + self.transaction_queue.lock().unwrap().clear(); + self.prepare_sealing(chain); + } + + fn status(&self) -> MinerStatus { + let status = self.transaction_queue.lock().unwrap().status(); + MinerStatus { + transaction_queue_pending: status.pending, + transaction_queue_future: status.future, + } + } + + fn import_transactions(&self, transactions: Vec, fetch_nonce: T) -> Result<(), Error> + where T: Fn(&Address) -> U256 { + let mut transaction_queue = self.transaction_queue.lock().unwrap(); + transaction_queue.add_all(transactions, fetch_nonce) + } + + fn pending_transactions_hashes(&self) -> Vec { + let transaction_queue = self.transaction_queue.lock().unwrap(); + transaction_queue.pending_hashes() + } + + fn prepare_sealing(&self, chain: &BlockChainClient) { + let no_of_transactions = 128; + let transactions = self.transaction_queue.lock().unwrap().top_transactions(no_of_transactions); + + let b = chain.prepare_sealing( + self.author(), + self.extra_data(), + transactions, + ); + *self.sealing_block.lock().unwrap() = b; + } + + fn sealing_block(&self, chain: &BlockChainClient) -> &Mutex> { + if self.sealing_block.lock().unwrap().is_none() { + self.sealing_enabled.store(true, atomic::Ordering::Relaxed); + // TODO: Above should be on a timer that resets after two blocks have arrived without being asked for. + self.prepare_sealing(chain); + } + &self.sealing_block + } + + fn submit_seal(&self, chain: &BlockChainClient, pow_hash: H256, seal: Vec) -> Result<(), Error> { + let mut maybe_b = self.sealing_block.lock().unwrap(); + match *maybe_b { + Some(ref b) if b.hash() == pow_hash => {} + _ => { return Err(Error::PowHashInvalid); } + } + + let b = maybe_b.take(); + match chain.try_seal(b.unwrap(), seal) { + Err(old) => { + *maybe_b = Some(old); + Err(Error::PowInvalid) + } + Ok(sealed) => { + // TODO: commit DB from `sealed.drain` and make a VerifiedBlock to skip running the transactions twice. + try!(chain.import_block(sealed.rlp_bytes())); + Ok(()) + } + } + } + + fn chain_new_blocks(&self, chain: &BlockChainClient, imported: &[H256], invalid: &[H256], enacted: &[H256], retracted: &[H256]) { + fn fetch_transactions(chain: &BlockChainClient, hash: &H256) -> Vec { + let block = chain + .block(BlockId::Hash(*hash)) + // Client should send message after commit to db and inserting to chain. + .expect("Expected in-chain blocks."); + let block = BlockView::new(&block); + block.transactions() + } + + { + let in_chain = vec![imported, enacted, invalid]; + let in_chain = in_chain + .par_iter() + .flat_map(|h| h.par_iter().map(|h| fetch_transactions(chain, h))); + let out_of_chain = retracted + .par_iter() + .map(|h| fetch_transactions(chain, h)); + + in_chain.for_each(|txs| { + let mut transaction_queue = self.transaction_queue.lock().unwrap(); + let hashes = txs.iter().map(|tx| tx.hash()).collect::>(); + transaction_queue.remove_all(&hashes, |a| chain.nonce(a)); + }); + out_of_chain.for_each(|txs| { + // populate sender + for tx in &txs { + let _sender = tx.sender(); + } + let mut transaction_queue = self.transaction_queue.lock().unwrap(); + let _ = transaction_queue.add_all(txs, |a| chain.nonce(a)); + }); + } + + if self.sealing_enabled.load(atomic::Ordering::Relaxed) { + self.prepare_sealing(chain); + } + } +} diff --git a/sync/src/transaction_queue.rs b/miner/src/transaction_queue.rs similarity index 91% rename from sync/src/transaction_queue.rs rename to miner/src/transaction_queue.rs index 0f9ef19c8..880c73750 100644 --- a/sync/src/transaction_queue.rs +++ b/miner/src/transaction_queue.rs @@ -28,13 +28,13 @@ //! ```rust //! extern crate ethcore_util as util; //! extern crate ethcore; -//! extern crate ethsync; +//! extern crate ethminer; //! extern crate rustc_serialize; //! //! use util::crypto::KeyPair; //! use util::hash::Address; //! use util::numbers::{Uint, U256}; -//! use ethsync::TransactionQueue; +//! use ethminer::TransactionQueue; //! use ethcore::transaction::*; //! use rustc_serialize::hex::FromHex; //! @@ -86,7 +86,7 @@ use util::numbers::{Uint, U256}; use util::hash::{Address, H256}; use util::table::*; use ethcore::transaction::*; -use ethcore::error::Error; +use ethcore::error::{Error, TransactionError}; #[derive(Clone, Debug)] @@ -245,6 +245,8 @@ pub struct TransactionQueueStatus { /// TransactionQueue implementation pub struct TransactionQueue { + /// Gas Price threshold for transactions that can be imported to this queue (defaults to 0) + minimal_gas_price: U256, /// Priority queue for transactions that can go to block current: TransactionSet, /// Priority queue for transactions that has been received but are not yet valid to go to block @@ -281,6 +283,7 @@ impl TransactionQueue { }; TransactionQueue { + minimal_gas_price: U256::zero(), current: current, future: future, by_hash: HashMap::new(), @@ -288,6 +291,12 @@ impl TransactionQueue { } } + /// Sets new gas price threshold for incoming transactions. + /// Any transactions already imported to the queue are not affected. + pub fn set_minimal_gas_price(&mut self, min_gas_price: U256) { + self.minimal_gas_price = min_gas_price; + } + // Will be used when rpc merged #[allow(dead_code)] /// Returns current status for this queue @@ -310,6 +319,19 @@ impl TransactionQueue { /// Add signed transaction to queue to be verified and imported pub fn add(&mut self, tx: SignedTransaction, fetch_nonce: &T) -> Result<(), Error> where T: Fn(&Address) -> U256 { + + if tx.gas_price < self.minimal_gas_price { + trace!(target: "sync", + "Dropping transaction below minimal gas price threshold: {:?} (gp: {} < {})", + tx.hash(), tx.gas_price, self.minimal_gas_price + ); + + return Err(Error::Transaction(TransactionError::InsufficientGasPrice{ + minimal: self.minimal_gas_price, + got: tx.gas_price + })); + } + self.import_tx(try!(VerifiedTransaction::new(tx)), fetch_nonce); Ok(()) } @@ -346,7 +368,7 @@ impl TransactionQueue { self.update_future(&sender, current_nonce); // And now lets check if there is some chain of transactions in future // that should be placed in current - self.move_matching_future_to_current(sender.clone(), current_nonce, current_nonce); + self.move_matching_future_to_current(sender, current_nonce, current_nonce); return; } @@ -362,7 +384,7 @@ impl TransactionQueue { self.move_all_to_future(&sender, current_nonce); // And now lets check if there is some chain of transactions in future // that should be placed in current. It should also update last_nonces. - self.move_matching_future_to_current(sender.clone(), current_nonce, current_nonce); + self.move_matching_future_to_current(sender, current_nonce, current_nonce); return; } } @@ -377,7 +399,7 @@ impl TransactionQueue { for k in all_nonces_from_sender { let order = self.future.drop(&sender, &k).unwrap(); if k >= current_nonce { - self.future.insert(sender.clone(), k, order.update_height(k, current_nonce)); + self.future.insert(*sender, k, order.update_height(k, current_nonce)); } else { // Remove the transaction completely self.by_hash.remove(&order.hash); @@ -397,7 +419,7 @@ impl TransactionQueue { // Goes to future or is removed let order = self.current.drop(&sender, &k).unwrap(); if k >= current_nonce { - self.future.insert(sender.clone(), k, order.update_height(k, current_nonce)); + self.future.insert(*sender, k, order.update_height(k, current_nonce)); } else { self.by_hash.remove(&order.hash); } @@ -417,6 +439,14 @@ impl TransactionQueue { .collect() } + /// Returns hashes of all transactions from current, ordered by priority. + pub fn pending_hashes(&self) -> Vec { + self.current.by_priority + .iter() + .map(|t| t.hash) + .collect() + } + /// Removes all elements (in any state) from the queue pub fn clear(&mut self) { self.current.clear(); @@ -438,8 +468,8 @@ impl TransactionQueue { // remove also from priority and hash self.future.by_priority.remove(&order); // Put to current - let order = order.update_height(current_nonce.clone(), first_nonce); - self.current.insert(address.clone(), current_nonce, order); + let order = order.update_height(current_nonce, first_nonce); + self.current.insert(address, current_nonce, order); current_nonce = current_nonce + U256::one(); } } @@ -487,10 +517,10 @@ impl TransactionQueue { } let base_nonce = fetch_nonce(&address); - Self::replace_transaction(tx, base_nonce.clone(), &mut self.current, &mut self.by_hash); - self.last_nonces.insert(address.clone(), nonce); + Self::replace_transaction(tx, base_nonce, &mut self.current, &mut self.by_hash); + self.last_nonces.insert(address, nonce); // But maybe there are some more items waiting in future? - self.move_matching_future_to_current(address.clone(), nonce + U256::one(), base_nonce); + self.move_matching_future_to_current(address, nonce + U256::one(), base_nonce); self.current.enforce_limit(&mut self.by_hash); } @@ -504,7 +534,7 @@ impl TransactionQueue { let address = tx.sender(); let nonce = tx.nonce(); - by_hash.insert(hash.clone(), tx); + by_hash.insert(hash, tx); if let Some(old) = set.insert(address, nonce, order.clone()) { // There was already transaction in queue. Let's check which one should stay let old_fee = old.gas_price; @@ -620,6 +650,22 @@ mod test { assert_eq!(stats.pending, 1); } + #[test] + fn should_not_import_transaction_below_min_gas_price_threshold() { + // given + let mut txq = TransactionQueue::new(); + let tx = new_tx(); + txq.set_minimal_gas_price(tx.gas_price + U256::one()); + + // when + txq.add(tx, &default_nonce).unwrap_err(); + + // then + let stats = txq.status(); + assert_eq!(stats.pending, 0); + assert_eq!(stats.future, 0); + } + #[test] fn should_reject_incorectly_signed_transaction() { // given @@ -663,6 +709,24 @@ mod test { assert_eq!(top.len(), 2); } + #[test] + fn should_return_pending_hashes() { + // given + let mut txq = TransactionQueue::new(); + + let (tx, tx2) = new_txs(U256::from(1)); + + // when + txq.add(tx.clone(), &default_nonce).unwrap(); + txq.add(tx2.clone(), &default_nonce).unwrap(); + + // then + let top = txq.pending_hashes(); + assert_eq!(top[0], tx.hash()); + assert_eq!(top[1], tx2.hash()); + assert_eq!(top.len(), 2); + } + #[test] fn should_put_transaction_to_futures_if_gap_detected() { // given @@ -831,7 +895,7 @@ mod test { fn should_drop_transactions_with_old_nonces() { let mut txq = TransactionQueue::new(); let tx = new_tx(); - let last_nonce = tx.nonce.clone() + U256::one(); + let last_nonce = tx.nonce + U256::one(); let fetch_last_nonce = |_a: &Address| last_nonce; // when diff --git a/parity/main.rs b/parity/main.rs index bbcb17bae..020a74bcb 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -24,6 +24,7 @@ extern crate rustc_serialize; extern crate ethcore_util as util; extern crate ethcore; extern crate ethsync; +extern crate ethminer; #[macro_use] extern crate log as rlog; extern crate env_logger; @@ -50,6 +51,7 @@ use ethcore::client::*; use ethcore::service::{ClientService, NetSyncMessage}; use ethcore::ethereum; use ethsync::{EthSync, SyncConfig, SyncProvider}; +use ethminer::{Miner, MinerService}; use docopt::Docopt; use daemonize::Daemonize; use number_prefix::{binary_prefix, Standalone, Prefixed}; @@ -76,70 +78,86 @@ Usage: parity [options] Protocol Options: - --chain CHAIN Specify the blockchain type. CHAIN may be either a JSON chain specification file - or olympic, frontier, homestead, mainnet, morden, or testnet [default: homestead]. - --testnet Equivalent to --chain testnet (geth-compatible). - --networkid INDEX Override the network identifier from the chain we are on. - --pruning METHOD Configure pruning of the state/storage trie. METHOD may be one of: archive, - light (experimental), fast (experimental) [default: archive]. - -d --datadir PATH Specify the database & configuration directory path [default: $HOME/.parity] - --db-path PATH Specify the database & configuration directory path [default: $HOME/.parity] - --keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys] + --chain CHAIN Specify the blockchain type. CHAIN may be either a + JSON chain specification file or olympic, frontier, + homestead, mainnet, morden, or testnet + [default: homestead]. + -d --db-path PATH Specify the database & configuration directory path + [default: $HOME/.parity]. + --keys-path PATH Specify the path for JSON key files to be found + [default: $HOME/.web3/keys]. --identity NAME Specify your node's name. Networking Options: - --port PORT Override the port on which the node should listen [default: 30303]. + --port PORT Override the port on which the node should listen + [default: 30303]. --peers NUM Try to maintain that many peers [default: 25]. - --nat METHOD Specify method to use for determining public address. Must be one of: any, none, - upnp, extip:(IP) [default: any]. - --bootnodes NODES Specify additional comma-separated bootnodes. - --no-bootstrap Don't bother trying to connect to standard bootnodes. + --nat METHOD Specify method to use for determining public + address. Must be one of: any, none, upnp, + extip: [default: any]. + --network-id INDEX Override the network identifier from the chain we + are on. + --bootnodes NODES Override the bootnodes from our chain. NODES should + be comma-delimited enodes. --no-discovery Disable new peer discovery. - --node-key KEY Specify node secret key, either as 64-character hex string or input to SHA3 operation. + --node-key KEY Specify node secret key, either as 64-character hex + string or input to SHA3 operation. API and Console Options: -j --jsonrpc Enable the JSON-RPC API sever. - --jsonrpc-addr HOST Specify the hostname portion of the JSONRPC API server [default: 127.0.0.1]. - --jsonrpc-port PORT Specify the port portion of the JSONRPC API server [default: 8545]. - --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses [default: null]. - --jsonrpc-apis APIS Specify the APIs available through the JSONRPC interface. APIS is a comma-delimited - list of API name. Possible name are web3, eth and net. [default: web3,eth,net,personal]. - - --rpc Equivalent to --jsonrpc (geth-compatible). - --rpcaddr HOST Equivalent to --jsonrpc-addr HOST (geth-compatible). - --rpcport PORT Equivalent to --jsonrpc-port PORT (geth-compatible). - --rpcapi APIS Equivalent to --jsonrpc-apis APIS (geth-compatible). - --rpccorsdomain URL Equivalent to --jsonrpc-cors URL (geth-compatible). + --jsonrpc-addr HOST Specify the hostname portion of the JSONRPC API + server [default: 127.0.0.1]. + --jsonrpc-port PORT Specify the port portion of the JSONRPC API server + [default: 8545]. + --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses + [default: null]. + --jsonrpc-apis APIS Specify the APIs available through the JSONRPC + interface. APIS is a comma-delimited list of API + name. Possible name are web3, eth and net. + [default: web3,eth,net,personal]. Sealing/Mining Options: - --author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards - from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63]. - --extra-data STRING Specify a custom extra-data for authored blocks, no more than 32 characters. + --gas-price WEI Minimum amount of Wei to be paid for a transaction + to be accepted for mining [default: 20000000000]. + --author ADDRESS Specify the block author (aka "coinbase") address + for sending block rewards from sealed blocks + [default: 0037a6b811ffeb6e072da21179d11b1406371c63]. + --extra-data STRING Specify a custom extra-data for authored blocks, no + more than 32 characters. -Memory Footprint Options: - --cache-pref-size BYTES Specify the prefered size of the blockchain cache in bytes [default: 16384]. - --cache-max-size BYTES Specify the maximum size of the blockchain cache in bytes [default: 262144]. - --queue-max-size BYTES Specify the maximum size of memory to use for block queue [default: 52428800]. - --cache MEGABYTES Set total amount of cache to use for the entire system, mutually exclusive with - other cache options (geth-compatible). +Footprint Options: + --pruning METHOD Configure pruning of the state/storage trie. METHOD + may be one of: archive, basic (experimental), fast + (experimental) [default: archive]. + --cache-pref-size BYTES Specify the prefered size of the blockchain cache in + bytes [default: 16384]. + --cache-max-size BYTES Specify the maximum size of the blockchain cache in + bytes [default: 262144]. + --queue-max-size BYTES Specify the maximum size of memory to use for block + queue [default: 52428800]. + --cache MEGABYTES Set total amount of discretionary memory to use for + the entire system, overrides other cache and queue + options. -Geth-Compatibility Options +Geth-compatibility Options: --datadir PATH Equivalent to --db-path PATH. --testnet Equivalent to --chain testnet. - --networkid INDEX Override the network identifier from the chain we are on. + --networkid INDEX Equivalent to --network-id INDEX. + --maxpeers COUNT Equivalent to --peers COUNT. + --nodekey KEY Equivalent to --node-key KEY. + --nodiscover Equivalent to --no-discovery. --rpc Equivalent to --jsonrpc. --rpcaddr HOST Equivalent to --jsonrpc-addr HOST. --rpcport PORT Equivalent to --jsonrpc-port PORT. --rpcapi APIS Equivalent to --jsonrpc-apis APIS. --rpccorsdomain URL Equivalent to --jsonrpc-cors URL. - --maxpeers COUNT Equivalent to --peers COUNT. - --nodekey KEY Equivalent to --node-key KEY. - --nodiscover Equivalent to --no-discovery. + --gasprice WEI Equivalent to --gas-price WEI. --etherbase ADDRESS Equivalent to --author ADDRESS. --extradata STRING Equivalent to --extra-data STRING. Miscellaneous Options: - -l --logging LOGGING Specify the logging level. + -l --logging LOGGING Specify the logging level. Must conform to the same + format as RUST_LOG. -v --version Show information about version. -h --help Show this screen. "#; @@ -157,8 +175,8 @@ struct Args { flag_cache: Option, flag_keys_path: String, flag_bootnodes: Option, + flag_network_id: Option, flag_pruning: String, - flag_no_bootstrap: bool, flag_port: u16, flag_peers: usize, flag_no_discovery: bool, @@ -172,17 +190,19 @@ struct Args { flag_jsonrpc_port: u16, flag_jsonrpc_cors: String, flag_jsonrpc_apis: String, + flag_author: String, + flag_gas_price: String, + flag_extra_data: Option, flag_logging: Option, flag_version: bool, // geth-compatibility... flag_nodekey: Option, flag_nodiscover: bool, flag_maxpeers: Option, - flag_author: String, - flag_extra_data: Option, flag_datadir: Option, flag_extradata: Option, flag_etherbase: Option, + flag_gasprice: Option, flag_rpc: bool, flag_rpcaddr: Option, flag_rpcport: Option, @@ -219,7 +239,15 @@ fn setup_log(init: &Option) { } #[cfg(feature = "rpc")] -fn setup_rpc_server(client: Arc, sync: Arc, secret_store: Arc, url: &str, cors_domain: &str, apis: Vec<&str>) -> Option> { +fn setup_rpc_server( + client: Arc, + sync: Arc, + secret_store: Arc, + miner: Arc, + url: &str, + cors_domain: &str, + apis: Vec<&str> +) -> Option> { use rpc::v1::*; let server = rpc::RpcServer::new(); @@ -228,8 +256,8 @@ fn setup_rpc_server(client: Arc, sync: Arc, secret_store: Arc server.add_delegate(Web3Client::new().to_delegate()), "net" => server.add_delegate(NetClient::new(&sync).to_delegate()), "eth" => { - server.add_delegate(EthClient::new(&client, &sync, &secret_store).to_delegate()); - server.add_delegate(EthFilterClient::new(&client).to_delegate()); + server.add_delegate(EthClient::new(&client, &sync, &secret_store, &miner).to_delegate()); + server.add_delegate(EthFilterClient::new(&client, &miner).to_delegate()); } "personal" => server.add_delegate(PersonalClient::new(&secret_store).to_delegate()), _ => { @@ -241,7 +269,15 @@ fn setup_rpc_server(client: Arc, sync: Arc, secret_store: Arc, _sync: Arc, _url: &str) -> Option> { +fn setup_rpc_server( + _client: Arc, + _sync: Arc, + _secret_store: Arc, + _miner: Arc, + _url: &str, + _cors_domain: &str, + _apis: Vec<&str> +) -> Option> { None } @@ -276,7 +312,16 @@ impl Configuration { fn author(&self) -> Address { let d = self.args.flag_etherbase.as_ref().unwrap_or(&self.args.flag_author); - Address::from_str(d).unwrap_or_else(|_| die!("{}: Invalid address for --author. Must be 40 hex characters, without the 0x at the beginning.", self.args.flag_author)) + Address::from_str(d).unwrap_or_else(|_| { + die!("{}: Invalid address for --author. Must be 40 hex characters, without the 0x at the beginning.", d) + }) + } + + fn gas_price(&self) -> U256 { + let d = self.args.flag_gasprice.as_ref().unwrap_or(&self.args.flag_gas_price); + U256::from_dec_str(d).unwrap_or_else(|_| { + die!("{}: Invalid gas price given. Must be a decimal unsigned 256-bit number.", d) + }) } fn extra_data(&self) -> Bytes { @@ -299,7 +344,9 @@ impl Configuration { "frontier" | "homestead" | "mainnet" => ethereum::new_frontier(), "morden" | "testnet" => ethereum::new_morden(), "olympic" => ethereum::new_olympic(), - f => Spec::from_json_utf8(contents(f).unwrap_or_else(|_| die!("{}: Couldn't read chain specification file. Sure it exists?", f)).as_ref()), + f => Spec::from_json_utf8(contents(f).unwrap_or_else(|_| { + die!("{}: Couldn't read chain specification file. Sure it exists?", f) + }).as_ref()), } } @@ -312,11 +359,15 @@ impl Configuration { } fn init_nodes(&self, spec: &Spec) -> Vec { - let mut r = if self.args.flag_no_bootstrap { Vec::new() } else { spec.nodes().clone() }; - if let Some(ref x) = self.args.flag_bootnodes { - r.extend(x.split(",").map(|s| Self::normalize_enode(s).unwrap_or_else(|| die!("{}: Invalid node address format given for a boot node.", s)))); + match self.args.flag_bootnodes { + Some(ref x) if x.len() > 0 => x.split(',').map(|s| { + Self::normalize_enode(s).unwrap_or_else(|| { + die!("{}: Invalid node address format given for a boot node.", s) + }) + }).collect(), + Some(_) => Vec::new(), + None => spec.nodes().clone(), } - r } #[cfg_attr(feature="dev", allow(useless_format))] @@ -327,7 +378,7 @@ impl Configuration { let host = IpAddr::from_str(host).unwrap_or_else(|_| die!("Invalid host given with `--nat extip:{}`", host)); Some(SocketAddr::new(host, self.args.flag_port)) } else { - listen_address.clone() + listen_address }; (listen_address, public_address) } @@ -348,6 +399,38 @@ impl Configuration { ret } + fn client_config(&self) -> ClientConfig { + let mut client_config = ClientConfig::default(); + match self.args.flag_cache { + Some(mb) => { + client_config.blockchain.max_cache_size = mb * 1024 * 1024; + client_config.blockchain.pref_cache_size = client_config.blockchain.max_cache_size * 3 / 4; + } + None => { + client_config.blockchain.pref_cache_size = self.args.flag_cache_pref_size; + client_config.blockchain.max_cache_size = self.args.flag_cache_max_size; + } + } + client_config.pruning = match self.args.flag_pruning.as_str() { + "archive" => journaldb::Algorithm::Archive, + "light" => journaldb::Algorithm::EarlyMerge, + "fast" => journaldb::Algorithm::OverlayRecent, + "basic" => journaldb::Algorithm::RefCounted, + _ => { die!("Invalid pruning method given."); } + }; + client_config.name = self.args.flag_identity.clone(); + client_config.queue.max_mem_use = self.args.flag_queue_max_size; + client_config + } + + fn sync_config(&self, spec: &Spec) -> SyncConfig { + let mut sync_config = SyncConfig::default(); + sync_config.network_id = self.args.flag_network_id.as_ref().or(self.args.flag_networkid.as_ref()).map_or(spec.network_id(), |id| { + U256::from_str(id).unwrap_or_else(|_| die!("{}: Invalid index given with --network-id/--networkid", id)) + }); + sync_config + } + fn execute(&self) { if self.args.flag_version { print_version(); @@ -388,12 +471,13 @@ impl Configuration { } if self.args.cmd_list { println!("Known addresses:"); - for &(addr, _) in secret_store.accounts().unwrap().iter() { + for &(addr, _) in &secret_store.accounts().unwrap() { println!("{:?}", addr); } } } + #[cfg_attr(feature="dev", allow(useless_format))] fn execute_client(&self) { // Setup panic handler let panic_handler = PanicHandler::new_in_arc(); @@ -405,39 +489,21 @@ impl Configuration { let spec = self.spec(); let net_settings = self.net_settings(&spec); - let mut sync_config = SyncConfig::default(); - sync_config.network_id = self.args.flag_networkid.as_ref().map(|id| U256::from_str(id).unwrap_or_else(|_| die!("{}: Invalid index given with --networkid", id))).unwrap_or(spec.network_id()); + let sync_config = self.sync_config(&spec); // Build client - let mut client_config = ClientConfig::default(); - match self.args.flag_cache { - Some(mb) => { - client_config.blockchain.max_cache_size = mb * 1024 * 1024; - client_config.blockchain.pref_cache_size = client_config.blockchain.max_cache_size / 2; - } - None => { - client_config.blockchain.pref_cache_size = self.args.flag_cache_pref_size; - client_config.blockchain.max_cache_size = self.args.flag_cache_max_size; - } - } - client_config.pruning = match self.args.flag_pruning.as_str() { - "" => journaldb::Algorithm::Archive, - "archive" => journaldb::Algorithm::Archive, - "pruned" => journaldb::Algorithm::EarlyMerge, - "fast" => journaldb::Algorithm::OverlayRecent, -// "slow" => journaldb::Algorithm::RefCounted, // TODO: @gavofyork uncomment this once ref-count algo is merged. - _ => { die!("Invalid pruning method given."); } - }; - client_config.name = self.args.flag_identity.clone(); - client_config.queue.max_mem_use = self.args.flag_queue_max_size; - let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap(); + let mut service = ClientService::start(self.client_config(), spec, net_settings, &Path::new(&self.path())).unwrap(); panic_handler.forward_from(&service); - let client = service.client().clone(); - client.set_author(self.author()); - client.set_extra_data(self.extra_data()); + let client = service.client(); + + // Miner + let miner = Miner::new(); + miner.set_author(self.author()); + miner.set_extra_data(self.extra_data()); + miner.set_minimal_gas_price(self.gas_price()); // Sync - let sync = EthSync::register(service.network(), sync_config, client); + let sync = EthSync::register(service.network(), sync_config, client.clone(), miner.clone()); // Secret Store let account_service = Arc::new(AccountService::new()); @@ -452,11 +518,18 @@ impl Configuration { let cors = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors); // TODO: use this as the API list. let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis); - let server_handler = setup_rpc_server(service.client(), sync.clone(), account_service.clone(), &url, cors, apis.split(",").collect()); + let server_handler = setup_rpc_server( + service.client(), + sync.clone(), + account_service.clone(), + miner.clone(), + &url, + cors, + apis.split(',').collect() + ); if let Some(handler) = server_handler { panic_handler.forward_from(handler.deref()); } - } // Register IO handler @@ -464,6 +537,7 @@ impl Configuration { client: service.client(), info: Default::default(), sync: sync.clone(), + accounts: account_service.clone(), }); service.io().register_handler(io_handler).expect("Error registering IO handler"); @@ -526,7 +600,11 @@ impl Informant { let report = client.report(); let sync_info = sync.status(); - if let (_, _, &Some(ref last_report)) = (self.chain_info.read().unwrap().deref(), self.cache_info.read().unwrap().deref(), self.report.read().unwrap().deref()) { + if let (_, _, &Some(ref last_report)) = ( + self.chain_info.read().unwrap().deref(), + self.cache_info.read().unwrap().deref(), + self.report.read().unwrap().deref() + ) { println!("[ #{} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, #{}, {}+{} queued ···// mem: {} db, {} chain, {} queue, {} sync ]", chain_info.best_block_number, chain_info.best_block_hash, @@ -555,20 +633,28 @@ impl Informant { const INFO_TIMER: TimerToken = 0; +const ACCOUNT_TICK_TIMER: TimerToken = 10; +const ACCOUNT_TICK_MS: u64 = 60000; + struct ClientIoHandler { client: Arc, sync: Arc, + accounts: Arc, info: Informant, } impl IoHandler for ClientIoHandler { fn initialize(&self, io: &IoContext) { io.register_timer(INFO_TIMER, 5000).expect("Error registering timer"); + io.register_timer(ACCOUNT_TICK_TIMER, ACCOUNT_TICK_MS).expect("Error registering account timer"); + } fn timeout(&self, _io: &IoContext, timer: TimerToken) { - if INFO_TIMER == timer { - self.info.tick(&self.client, &self.sync); + match timer { + INFO_TIMER => { self.info.tick(&self.client, &self.sync); } + ACCOUNT_TICK_TIMER => { self.accounts.tick(); }, + _ => {} } } } diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index a1f154ca8..fa89041d8 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -18,10 +18,11 @@ ethcore-util = { path = "../util" } ethcore = { path = "../ethcore" } ethash = { path = "../ethash" } ethsync = { path = "../sync" } -clippy = { version = "0.0.49", optional = true } +ethminer = { path = "../miner" } rustc-serialize = "0.3" transient-hashmap = "0.1" serde_macros = { version = "0.7.0", optional = true } +clippy = { version = "0.0.50", optional = true } [build-dependencies] serde_codegen = { version = "0.7.0", optional = true } @@ -30,4 +31,4 @@ syntex = "0.29.0" [features] default = ["serde_codegen"] nightly = ["serde_macros"] -dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev"] +dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethminer/dev"] diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 731ded8c4..3096a45c9 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -19,6 +19,8 @@ #![cfg_attr(feature="nightly", feature(custom_derive, custom_attribute, plugin))] #![cfg_attr(feature="nightly", plugin(serde_macros, clippy))] +#[macro_use] +extern crate log; extern crate rustc_serialize; extern crate serde; extern crate serde_json; @@ -27,6 +29,7 @@ extern crate jsonrpc_http_server; extern crate ethcore_util as util; extern crate ethcore; extern crate ethsync; +extern crate ethminer; extern crate transient_hashmap; use std::sync::Arc; diff --git a/rpc/src/v1/helpers/poll_filter.rs b/rpc/src/v1/helpers/poll_filter.rs index 465290270..f9ed6230c 100644 --- a/rpc/src/v1/helpers/poll_filter.rs +++ b/rpc/src/v1/helpers/poll_filter.rs @@ -1,10 +1,13 @@ //! Helper type with all filter possibilities. +use util::hash::H256; use ethcore::filter::Filter; +pub type BlockNumber = u64; + #[derive(Clone)] pub enum PollFilter { - Block, - PendingTransaction, - Logs(Filter) + Block(BlockNumber), + PendingTransaction(Vec), + Logs(BlockNumber, Filter) } diff --git a/rpc/src/v1/helpers/poll_manager.rs b/rpc/src/v1/helpers/poll_manager.rs index 0297384d1..9735d7d5d 100644 --- a/rpc/src/v1/helpers/poll_manager.rs +++ b/rpc/src/v1/helpers/poll_manager.rs @@ -22,28 +22,13 @@ use transient_hashmap::{TransientHashMap, Timer, StandardTimer}; const POLL_LIFETIME: u64 = 60; pub type PollId = usize; -pub type BlockNumber = u64; - -pub struct PollInfo { - pub filter: F, - pub block_number: BlockNumber -} - -impl Clone for PollInfo where F: Clone { - fn clone(&self) -> Self { - PollInfo { - filter: self.filter.clone(), - block_number: self.block_number.clone() - } - } -} /// Indexes all poll requests. /// /// Lazily garbage collects unused polls info. pub struct PollManager where T: Timer { - polls: TransientHashMap, T>, - next_available_id: PollId + polls: TransientHashMap, + next_available_id: PollId, } impl PollManager { @@ -54,6 +39,7 @@ impl PollManager { } impl PollManager where T: Timer { + pub fn new_with_timer(timer: T) -> Self { PollManager { polls: TransientHashMap::new_with_timer(POLL_LIFETIME, timer), @@ -64,31 +50,30 @@ impl PollManager where T: Timer { /// Returns id which can be used for new poll. /// /// Stores information when last poll happend. - pub fn create_poll(&mut self, filter: F, block: BlockNumber) -> PollId { + pub fn create_poll(&mut self, filter: F) -> PollId { self.polls.prune(); + let id = self.next_available_id; + self.polls.insert(id, filter); + self.next_available_id += 1; - self.polls.insert(id, PollInfo { - filter: filter, - block_number: block, - }); id } - /// Updates information when last poll happend. - pub fn update_poll(&mut self, id: &PollId, block: BlockNumber) { - self.polls.prune(); - if let Some(info) = self.polls.get_mut(id) { - info.block_number = block; - } - } - - /// Returns number of block when last poll happend. - pub fn poll_info(&mut self, id: &PollId) -> Option<&PollInfo> { + // Implementation is always using `poll_mut` + #[cfg(test)] + /// Get a reference to stored poll filter + pub fn poll(&mut self, id: &PollId) -> Option<&F> { self.polls.prune(); self.polls.get(id) } + /// Get a mutable reference to stored poll filter + pub fn poll_mut(&mut self, id: &PollId) -> Option<&mut F> { + self.polls.prune(); + self.polls.get_mut(id) + } + /// Removes poll info. pub fn remove_poll(&mut self, id: &PollId) { self.polls.remove(id); @@ -97,48 +82,46 @@ impl PollManager where T: Timer { #[cfg(test)] mod tests { - use std::cell::RefCell; + use std::cell::Cell; use transient_hashmap::Timer; use v1::helpers::PollManager; struct TestTimer<'a> { - time: &'a RefCell, + time: &'a Cell, } impl<'a> Timer for TestTimer<'a> { fn get_time(&self) -> i64 { - *self.time.borrow() + self.time.get() } } #[test] fn test_poll_indexer() { - let time = RefCell::new(0); + let time = Cell::new(0); let timer = TestTimer { time: &time, }; let mut indexer = PollManager::new_with_timer(timer); - assert_eq!(indexer.create_poll(false, 20), 0); - assert_eq!(indexer.create_poll(true, 20), 1); + assert_eq!(indexer.create_poll(20), 0); + assert_eq!(indexer.create_poll(20), 1); - *time.borrow_mut() = 10; - indexer.update_poll(&0, 21); - assert_eq!(indexer.poll_info(&0).unwrap().filter, false); - assert_eq!(indexer.poll_info(&0).unwrap().block_number, 21); + time.set(10); + *indexer.poll_mut(&0).unwrap() = 21; + assert_eq!(*indexer.poll(&0).unwrap(), 21); + assert_eq!(*indexer.poll(&1).unwrap(), 20); - *time.borrow_mut() = 30; - indexer.update_poll(&1, 23); - assert_eq!(indexer.poll_info(&1).unwrap().filter, true); - assert_eq!(indexer.poll_info(&1).unwrap().block_number, 23); + time.set(30); + *indexer.poll_mut(&1).unwrap() = 23; + assert_eq!(*indexer.poll(&1).unwrap(), 23); - *time.borrow_mut() = 75; - indexer.update_poll(&0, 30); - assert!(indexer.poll_info(&0).is_none()); - assert_eq!(indexer.poll_info(&1).unwrap().filter, true); - assert_eq!(indexer.poll_info(&1).unwrap().block_number, 23); + time.set(75); + assert!(indexer.poll(&0).is_none()); + assert_eq!(*indexer.poll(&1).unwrap(), 23); indexer.remove_poll(&1); - assert!(indexer.poll_info(&1).is_none()); + assert!(indexer.poll(&1).is_none()); } + } diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 38e363624..0e8b8d863 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -15,9 +15,11 @@ // along with Parity. If not, see . //! Eth rpc implementation. -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::sync::{Arc, Weak, Mutex, RwLock}; +use std::ops::Deref; use ethsync::{SyncProvider, SyncState}; +use ethminer::{MinerService}; use jsonrpc_core::*; use util::numbers::*; use util::sha3::*; @@ -34,19 +36,29 @@ use v1::helpers::{PollFilter, PollManager}; use util::keys::store::AccountProvider; /// Eth rpc implementation. -pub struct EthClient where C: BlockChainClient, S: SyncProvider, A: AccountProvider { +pub struct EthClient + where C: BlockChainClient, + S: SyncProvider, + A: AccountProvider, + M: MinerService { client: Weak, sync: Weak, accounts: Weak, + miner: Weak, hashrates: RwLock>, } -impl EthClient where C: BlockChainClient, S: SyncProvider, A: AccountProvider { +impl EthClient + where C: BlockChainClient, + S: SyncProvider, + A: AccountProvider, + M: MinerService { /// Creates new EthClient. - pub fn new(client: &Arc, sync: &Arc, accounts: &Arc) -> Self { + pub fn new(client: &Arc, sync: &Arc, accounts: &Arc, miner: &Arc) -> Self { EthClient { client: Arc::downgrade(client), sync: Arc::downgrade(sync), + miner: Arc::downgrade(miner), accounts: Arc::downgrade(accounts), hashrates: RwLock::new(HashMap::new()), } @@ -98,10 +110,15 @@ impl EthClient where C: BlockChainClient, S: SyncProvider, A: } } -impl Eth for EthClient where C: BlockChainClient + 'static, S: SyncProvider + 'static, A: AccountProvider + 'static { +impl Eth for EthClient + where C: BlockChainClient + 'static, + S: SyncProvider + 'static, + A: AccountProvider + 'static, + M: MinerService + 'static { + fn protocol_version(&self, params: Params) -> Result { match params { - Params::None => to_value(&U256::from(take_weak!(self.sync).status().protocol_version)), + Params::None => Ok(Value::String(format!("{}", take_weak!(self.sync).status().protocol_version).to_owned())), _ => Err(Error::invalid_params()) } } @@ -155,6 +172,14 @@ impl Eth for EthClient where C: BlockChainClient + 'static, S: } } + fn accounts(&self, _: Params) -> Result { + let store = take_weak!(self.accounts); + match store.accounts() { + Ok(account_list) => to_value(&account_list), + Err(_) => Err(Error::internal_error()) + } + } + fn block_number(&self, params: Params) -> Result { match params { Params::None => to_value(&U256::from(take_weak!(self.client).chain_info().best_block_number)), @@ -162,37 +187,59 @@ impl Eth for EthClient where C: BlockChainClient + 'static, S: } } + fn balance(&self, params: Params) -> Result { + from_params::<(Address, BlockNumber)>(params) + .and_then(|(address, _block_number)| to_value(&take_weak!(self.client).balance(&address))) + } + + fn storage_at(&self, params: Params) -> Result { + from_params::<(Address, U256, BlockNumber)>(params) + .and_then(|(address, position, _block_number)| + to_value(&U256::from(take_weak!(self.client).storage_at(&address, &H256::from(position))))) + } + + fn transaction_count(&self, params: Params) -> Result { + from_params::<(Address, BlockNumber)>(params) + .and_then(|(address, _block_number)| to_value(&take_weak!(self.client).nonce(&address))) + } + fn block_transaction_count_by_hash(&self, params: Params) -> Result { from_params::<(H256,)>(params) - .and_then(|(hash,)| match take_weak!(self.client).block(BlockId::Hash(hash)) { - Some(bytes) => to_value(&BlockView::new(&bytes).transactions_count()), - None => Ok(Value::Null) - }) + .and_then(|(hash,)| // match + to_value(&take_weak!(self.client).block(BlockId::Hash(hash)) + .map_or_else(U256::zero, |bytes| U256::from(BlockView::new(&bytes).transactions_count())))) } fn block_transaction_count_by_number(&self, params: Params) -> Result { from_params::<(BlockNumber,)>(params) .and_then(|(block_number,)| match block_number { - BlockNumber::Pending => to_value(&take_weak!(self.sync).status().transaction_queue_pending), - _ => match take_weak!(self.client).block(block_number.into()) { - Some(bytes) => to_value(&BlockView::new(&bytes).transactions_count()), - None => Ok(Value::Null) - } + BlockNumber::Pending => to_value(&U256::from(take_weak!(self.miner).status().transaction_queue_pending)), + _ => to_value(&take_weak!(self.client).block(block_number.into()) + .map_or_else(U256::zero, |bytes| U256::from(BlockView::new(&bytes).transactions_count()))) }) } - fn block_uncles_count(&self, params: Params) -> Result { + fn block_uncles_count_by_hash(&self, params: Params) -> Result { from_params::<(H256,)>(params) - .and_then(|(hash,)| match take_weak!(self.client).block(BlockId::Hash(hash)) { - Some(bytes) => to_value(&BlockView::new(&bytes).uncles_count()), - None => Ok(Value::Null) + .and_then(|(hash,)| + to_value(&take_weak!(self.client).block(BlockId::Hash(hash)) + .map_or_else(U256::zero, |bytes| U256::from(BlockView::new(&bytes).uncles_count())))) + } + + fn block_uncles_count_by_number(&self, params: Params) -> Result { + from_params::<(BlockNumber,)>(params) + .and_then(|(block_number,)| match block_number { + BlockNumber::Pending => to_value(&U256::from(0)), + _ => to_value(&take_weak!(self.client).block(block_number.into()) + .map_or_else(U256::zero, |bytes| U256::from(BlockView::new(&bytes).uncles_count()))) }) } // TODO: do not ignore block number param fn code_at(&self, params: Params) -> Result { from_params::<(Address, BlockNumber)>(params) - .and_then(|(address, _block_number)| to_value(&take_weak!(self.client).code(&address).map_or_else(Bytes::default, Bytes::new))) + .and_then(|(address, _block_number)| + to_value(&take_weak!(self.client).code(&address).map_or_else(Bytes::default, Bytes::new))) } fn block_by_hash(&self, params: Params) -> Result { @@ -220,6 +267,13 @@ impl Eth for EthClient where C: BlockChainClient + 'static, S: .and_then(|(number, index)| self.transaction(TransactionId::Location(number.into(), index.value()))) } + fn compilers(&self, params: Params) -> Result { + match params { + Params::None => to_value(&vec![] as &Vec), + _ => Err(Error::invalid_params()) + } + } + fn logs(&self, params: Params) -> Result { from_params::<(Filter,)>(params) .and_then(|(filter,)| { @@ -234,8 +288,9 @@ impl Eth for EthClient where C: BlockChainClient + 'static, S: fn work(&self, params: Params) -> Result { match params { Params::None => { - let c = take_weak!(self.client); - let u = c.sealing_block().lock().unwrap(); + let miner = take_weak!(self.miner); + let client = take_weak!(self.client); + let u = miner.sealing_block(client.deref()).lock().unwrap(); match *u { Some(ref b) => { let pow_hash = b.hash(); @@ -253,9 +308,10 @@ impl Eth for EthClient where C: BlockChainClient + 'static, S: fn submit_work(&self, params: Params) -> Result { from_params::<(H64, H256, H256)>(params).and_then(|(nonce, pow_hash, mix_hash)| { // trace!("Decoded: nonce={}, pow_hash={}, mix_hash={}", nonce, pow_hash, mix_hash); - let c = take_weak!(self.client); + let miner = take_weak!(self.miner); + let client = take_weak!(self.client); let seal = vec![encode(&mix_hash).to_vec(), encode(&nonce).to_vec()]; - let r = c.submit_seal(pow_hash, seal); + let r = miner.submit_seal(client.deref(), pow_hash, seal); to_value(&r.is_ok()) }) } @@ -274,12 +330,21 @@ impl Eth for EthClient where C: BlockChainClient + 'static, S: let accounts = take_weak!(self.accounts); match accounts.account_secret(&transaction_request.from) { Ok(secret) => { - let sync = take_weak!(self.sync); + let miner = take_weak!(self.miner); + let client = take_weak!(self.client); + let transaction: EthTransaction = transaction_request.into(); let signed_transaction = transaction.sign(&secret); let hash = signed_transaction.hash(); - sync.insert_transaction(signed_transaction); - to_value(&hash) + + let import = miner.import_transactions(vec![signed_transaction], |a: &Address| client.nonce(a)); + match import { + Ok(_) => to_value(&hash), + Err(e) => { + warn!("Error sending transaction: {:?}", e); + to_value(&U256::zero()) + } + } }, Err(_) => { to_value(&U256::zero()) } } @@ -288,27 +353,39 @@ impl Eth for EthClient where C: BlockChainClient + 'static, S: } /// Eth filter rpc implementation. -pub struct EthFilterClient where C: BlockChainClient { +pub struct EthFilterClient + where C: BlockChainClient, + M: MinerService { + client: Weak, + miner: Weak, polls: Mutex>, } -impl EthFilterClient where C: BlockChainClient { +impl EthFilterClient + where C: BlockChainClient, + M: MinerService { + /// Creates new Eth filter client. - pub fn new(client: &Arc) -> Self { + pub fn new(client: &Arc, miner: &Arc) -> Self { EthFilterClient { client: Arc::downgrade(client), - polls: Mutex::new(PollManager::new()) + miner: Arc::downgrade(miner), + polls: Mutex::new(PollManager::new()), } } } -impl EthFilter for EthFilterClient where C: BlockChainClient + 'static { +impl EthFilter for EthFilterClient + where C: BlockChainClient + 'static, + M: MinerService + 'static { + fn new_filter(&self, params: Params) -> Result { from_params::<(Filter,)>(params) .and_then(|(filter,)| { let mut polls = self.polls.lock().unwrap(); - let id = polls.create_poll(PollFilter::Logs(filter.into()), take_weak!(self.client).chain_info().best_block_number); + let block_number = take_weak!(self.client).chain_info().best_block_number; + let id = polls.create_poll(PollFilter::Logs(block_number, filter.into())); to_value(&U256::from(id)) }) } @@ -317,7 +394,7 @@ impl EthFilter for EthFilterClient where C: BlockChainClient + 'static { match params { Params::None => { let mut polls = self.polls.lock().unwrap(); - let id = polls.create_poll(PollFilter::Block, take_weak!(self.client).chain_info().best_block_number); + let id = polls.create_poll(PollFilter::Block(take_weak!(self.client).chain_info().best_block_number)); to_value(&U256::from(id)) }, _ => Err(Error::invalid_params()) @@ -328,7 +405,9 @@ impl EthFilter for EthFilterClient where C: BlockChainClient + 'static { match params { Params::None => { let mut polls = self.polls.lock().unwrap(); - let id = polls.create_poll(PollFilter::PendingTransaction, take_weak!(self.client).chain_info().best_block_number); + let pending_transactions = take_weak!(self.miner).pending_transactions_hashes(); + let id = polls.create_poll(PollFilter::PendingTransaction(pending_transactions)); + to_value(&U256::from(id)) }, _ => Err(Error::invalid_params()) @@ -339,37 +418,47 @@ impl EthFilter for EthFilterClient where C: BlockChainClient + 'static { let client = take_weak!(self.client); from_params::<(Index,)>(params) .and_then(|(index,)| { - let info = self.polls.lock().unwrap().poll_info(&index.value()).cloned(); - match info { + let mut polls = self.polls.lock().unwrap(); + match polls.poll_mut(&index.value()) { None => Ok(Value::Array(vec![] as Vec)), - Some(info) => match info.filter { - PollFilter::Block => { + Some(filter) => match *filter { + PollFilter::Block(ref mut block_number) => { // + 1, cause we want to return hashes including current block hash. let current_number = client.chain_info().best_block_number + 1; - let hashes = (info.block_number..current_number).into_iter() + let hashes = (*block_number..current_number).into_iter() .map(BlockId::Number) .filter_map(|id| client.block_hash(id)) .collect::>(); - self.polls.lock().unwrap().update_poll(&index.value(), current_number); + *block_number = current_number; to_value(&hashes) }, - PollFilter::PendingTransaction => { - // TODO: fix implementation once TransactionQueue is merged - to_value(&vec![] as &Vec) + PollFilter::PendingTransaction(ref mut previous_hashes) => { + let current_hashes = take_weak!(self.miner).pending_transactions_hashes(); + // calculate diff + let previous_hashes_set = previous_hashes.into_iter().map(|h| h.clone()).collect::>(); + let diff = current_hashes + .iter() + .filter(|hash| previous_hashes_set.contains(&hash)) + .cloned() + .collect::>(); + + *previous_hashes = current_hashes; + + to_value(&diff) }, - PollFilter::Logs(mut filter) => { - filter.from_block = BlockId::Number(info.block_number); + PollFilter::Logs(ref mut block_number, ref mut filter) => { + filter.from_block = BlockId::Number(*block_number); filter.to_block = BlockId::Latest; - let logs = client.logs(filter) + let logs = client.logs(filter.clone()) .into_iter() .map(From::from) .collect::>(); let current_number = client.chain_info().best_block_number; - self.polls.lock().unwrap().update_poll(&index.value(), current_number); + *block_number = current_number; to_value(&logs) } } diff --git a/rpc/src/v1/impls/personal.rs b/rpc/src/v1/impls/personal.rs index ce200244c..2822059d6 100644 --- a/rpc/src/v1/impls/personal.rs +++ b/rpc/src/v1/impls/personal.rs @@ -39,12 +39,7 @@ impl Personal for PersonalClient where A: AccountProvider + 'static { fn accounts(&self, _: Params) -> Result { let store = take_weak!(self.accounts); match store.accounts() { - Ok(account_list) => { - Ok(Value::Array(account_list.iter() - .map(|&account| Value::String(format!("{:?}", account))) - .collect::>()) - ) - } + Ok(account_list) => to_value(&account_list), Err(_) => Err(Error::internal_error()) } } @@ -54,7 +49,7 @@ impl Personal for PersonalClient where A: AccountProvider + 'static { |(pass, )| { let store = take_weak!(self.accounts); match store.new_account(&pass) { - Ok(address) => Ok(Value::String(format!("{:?}", address))), + Ok(address) => Ok(Value::String(format!("0x{:?}", address))), Err(_) => Err(Error::internal_error()) } } diff --git a/rpc/src/v1/tests/eth.rs b/rpc/src/v1/tests/eth.rs new file mode 100644 index 000000000..35c227e40 --- /dev/null +++ b/rpc/src/v1/tests/eth.rs @@ -0,0 +1,279 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::HashMap; +use std::sync::Arc; +use jsonrpc_core::IoHandler; +use util::hash::{Address, H256}; +use util::numbers::U256; +use ethcore::client::{TestBlockChainClient, EachBlockWith}; +use v1::{Eth, EthClient}; +use v1::tests::helpers::{TestAccount, TestAccountProvider, TestSyncProvider, Config, TestMinerService}; + +fn blockchain_client() -> Arc { + let mut client = TestBlockChainClient::new(); + client.add_blocks(10, EachBlockWith::Nothing); + client.set_balance(Address::from(1), U256::from(5)); + client.set_storage(Address::from(1), H256::from(4), H256::from(7)); + client.set_code(Address::from(1), vec![0xff, 0x21]); + Arc::new(client) +} + +fn accounts_provider() -> Arc { + let mut accounts = HashMap::new(); + accounts.insert(Address::from(1), TestAccount::new("test")); + let ap = TestAccountProvider::new(accounts); + Arc::new(ap) +} + +fn sync_provider() -> Arc { + Arc::new(TestSyncProvider::new(Config { + protocol_version: 65, + num_peers: 120, + })) +} + +fn miner_service() -> Arc { + Arc::new(TestMinerService) +} + +struct EthTester { + _client: Arc, + _sync: Arc, + _accounts_provider: Arc, + _miner: Arc, + pub io: IoHandler, +} + +impl Default for EthTester { + fn default() -> Self { + let client = blockchain_client(); + let sync = sync_provider(); + let ap = accounts_provider(); + let miner = miner_service(); + let eth = EthClient::new(&client, &sync, &ap, &miner).to_delegate(); + let io = IoHandler::new(); + io.add_delegate(eth); + EthTester { + _client: client, + _sync: sync, + _accounts_provider: ap, + _miner: miner, + io: io + } + } +} + +#[test] +fn rpc_eth_protocol_version() { + let request = r#"{"jsonrpc": "2.0", "method": "eth_protocolVersion", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"65","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +#[ignore] +fn rpc_eth_syncing() { + unimplemented!() +} + +#[test] +#[ignore] +fn rpc_eth_hashrate() { + unimplemented!() +} + +#[test] +#[ignore] +fn rpc_eth_author() { + unimplemented!() +} + +#[test] +#[ignore] +fn rpc_eth_mining() { + unimplemented!() +} + +#[test] +fn rpc_eth_gas_price() { + let request = r#"{"jsonrpc": "2.0", "method": "eth_gasPrice", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x0ba43b7400","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_accounts() { + let request = r#"{"jsonrpc": "2.0", "method": "eth_accounts", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":["0x0000000000000000000000000000000000000001"],"id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_block_number() { + let request = r#"{"jsonrpc": "2.0", "method": "eth_blockNumber", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x0a","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_balance() { + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getBalance", + "params": ["0x0000000000000000000000000000000000000001", "latest"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0x05","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_storage_at() { + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getStorageAt", + "params": ["0x0000000000000000000000000000000000000001", "0x4", "latest"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0x07","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_transaction_count() { + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getTransactionCount", + "params": ["0x0000000000000000000000000000000000000001", "latest"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0x00","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_block_transaction_count_by_hash() { + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getBlockTransactionCountByHash", + "params": ["0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0x00","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_transaction_count_by_number() { + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getBlockTransactionCountByNumber", + "params": ["latest"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0x00","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_uncle_count_by_block_hash() { + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getUncleCountByBlockHash", + "params": ["0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0x00","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_uncle_count_by_block_number() { + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getUncleCountByBlockNumber", + "params": ["latest"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0x00","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_code() { + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getCode", + "params": ["0x0000000000000000000000000000000000000001", "latest"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0xff21","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +#[ignore] +fn rpc_eth_call() { + unimplemented!() +} + +#[test] +#[ignore] +fn rpc_eth_send_transaction() { + unimplemented!() +} + +#[test] +#[ignore] +fn rpc_eth_send_raw_transaction() { + unimplemented!() +} + +#[test] +#[ignore] +fn rpc_eth_sign() { + unimplemented!() +} + +#[test] +#[ignore] +fn rpc_eth_estimate_gas() { + unimplemented!() +} + +#[test] +fn rpc_eth_compilers() { + let request = r#"{"jsonrpc": "2.0", "method": "eth_getCompilers", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":[],"id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + + + diff --git a/rpc/src/v1/tests/helpers/account_provider.rs b/rpc/src/v1/tests/helpers/account_provider.rs new file mode 100644 index 000000000..ce5b76b44 --- /dev/null +++ b/rpc/src/v1/tests/helpers/account_provider.rs @@ -0,0 +1,90 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::RwLock; +use std::collections::HashMap; +use std::io; +use util::hash::{Address, H256}; +use util::crypto::{Secret, Signature}; +use util::keys::store::{AccountProvider, SigningError, EncryptedHashMapError}; + +/// Account mock. +#[derive(Clone)] +pub struct TestAccount { + /// True if account is unlocked. + pub unlocked: bool, + /// Account's password. + pub password: String, +} + +impl TestAccount { + pub fn new(password: &str) -> Self { + TestAccount { + unlocked: false, + password: password.to_owned(), + } + } +} + +/// Test account provider. +pub struct TestAccountProvider { + accounts: RwLock>, + pub adds: RwLock>, +} + +impl TestAccountProvider { + /// Basic constructor. + pub fn new(accounts: HashMap) -> Self { + TestAccountProvider { + accounts: RwLock::new(accounts), + adds: RwLock::new(vec![]), + } + } +} + +impl AccountProvider for TestAccountProvider { + fn accounts(&self) -> Result, io::Error> { + Ok(self.accounts.read().unwrap().keys().cloned().collect()) + } + + fn unlock_account(&self, account: &Address, pass: &str) -> Result<(), EncryptedHashMapError> { + match self.accounts.write().unwrap().get_mut(account) { + Some(ref mut acc) if acc.password == pass => { + acc.unlocked = true; + Ok(()) + }, + Some(_) => Err(EncryptedHashMapError::InvalidPassword), + None => Err(EncryptedHashMapError::UnknownIdentifier), + } + } + + fn new_account(&self, pass: &str) -> Result { + let mut adds = self.adds.write().unwrap(); + let address = Address::from(adds.len() as u64 + 2); + adds.push(pass.to_owned()); + Ok(address) + } + + fn account_secret(&self, _account: &Address) -> Result { + unimplemented!() + } + + fn sign(&self, _account: &Address, _message: &H256) -> Result { + unimplemented!() + } + +} + diff --git a/rpc/src/v1/tests/helpers/miner_service.rs b/rpc/src/v1/tests/helpers/miner_service.rs new file mode 100644 index 000000000..0cddf2a1e --- /dev/null +++ b/rpc/src/v1/tests/helpers/miner_service.rs @@ -0,0 +1,53 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use util::{Address, H256, U256, Bytes}; +use util::standard::*; +use ethcore::error::Error; +use ethcore::client::BlockChainClient; +use ethcore::block::ClosedBlock; +use ethcore::transaction::SignedTransaction; +use ethminer::{MinerService, MinerStatus}; + +pub struct TestMinerService; + +impl MinerService for TestMinerService { + + /// Returns miner's status. + fn status(&self) -> MinerStatus { unimplemented!(); } + + /// Imports transactions to transaction queue. + fn import_transactions(&self, _transactions: Vec, _fetch_nonce: T) -> Result<(), Error> where T: Fn(&Address) -> U256 { unimplemented!(); } + + /// Returns hashes of transactions currently in pending + fn pending_transactions_hashes(&self) -> Vec { unimplemented!(); } + + /// Removes all transactions from the queue and restart mining operation. + fn clear_and_reset(&self, _chain: &BlockChainClient) { unimplemented!(); } + + /// Called when blocks are imported to chain, updates transactions queue. + fn chain_new_blocks(&self, _chain: &BlockChainClient, _imported: &[H256], _invalid: &[H256], _enacted: &[H256], _retracted: &[H256]) { unimplemented!(); } + + /// New chain head event. Restart mining operation. + fn prepare_sealing(&self, _chain: &BlockChainClient) { unimplemented!(); } + + /// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock. + fn sealing_block(&self, _chain: &BlockChainClient) -> &Mutex> { unimplemented!(); } + + /// Submit `seal` as a valid solution for the header of `pow_hash`. + /// Will check the seal, but not actually insert the block into the chain. + fn submit_seal(&self, _chain: &BlockChainClient, _pow_hash: H256, _seal: Vec) -> Result<(), Error> { unimplemented!(); } +} \ No newline at end of file diff --git a/rpc/src/v1/tests/helpers/mod.rs b/rpc/src/v1/tests/helpers/mod.rs index 501bfb2d3..fc429982e 100644 --- a/rpc/src/v1/tests/helpers/mod.rs +++ b/rpc/src/v1/tests/helpers/mod.rs @@ -14,6 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +mod account_provider; mod sync_provider; +mod miner_service; +pub use self::account_provider::{TestAccount, TestAccountProvider}; pub use self::sync_provider::{Config, TestSyncProvider}; +pub use self::miner_service::{TestMinerService}; diff --git a/rpc/src/v1/tests/helpers/sync_provider.rs b/rpc/src/v1/tests/helpers/sync_provider.rs index a3711d949..631752dfc 100644 --- a/rpc/src/v1/tests/helpers/sync_provider.rs +++ b/rpc/src/v1/tests/helpers/sync_provider.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use ethcore::transaction::SignedTransaction; use ethsync::{SyncProvider, SyncStatus, SyncState}; pub struct Config { @@ -40,7 +39,6 @@ impl TestSyncProvider { num_peers: config.num_peers, num_active_peers: 0, mem_used: 0, - transaction_queue_pending: 0, }, } } @@ -50,9 +48,5 @@ impl SyncProvider for TestSyncProvider { fn status(&self) -> SyncStatus { self.status.clone() } - - fn insert_transaction(&self, _transaction: SignedTransaction) { - unimplemented!() - } } diff --git a/rpc/src/v1/tests/mod.rs b/rpc/src/v1/tests/mod.rs index 3a38ced15..21085a0fd 100644 --- a/rpc/src/v1/tests/mod.rs +++ b/rpc/src/v1/tests/mod.rs @@ -16,6 +16,8 @@ //!TODO: load custom blockchain state and test +mod eth; mod net; mod web3; mod helpers; +mod personal; diff --git a/rpc/src/v1/tests/personal.rs b/rpc/src/v1/tests/personal.rs new file mode 100644 index 000000000..261527c47 --- /dev/null +++ b/rpc/src/v1/tests/personal.rs @@ -0,0 +1,59 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use jsonrpc_core::IoHandler; +use v1::tests::helpers::{TestAccount, TestAccountProvider}; +use v1::{PersonalClient, Personal}; +use util::numbers::*; +use std::collections::*; + +fn accounts_provider() -> Arc { + let mut accounts = HashMap::new(); + accounts.insert(Address::from(1), TestAccount::new("test")); + let ap = TestAccountProvider::new(accounts); + Arc::new(ap) +} + +fn setup() -> (Arc, IoHandler) { + let test_provider = accounts_provider(); + let personal = PersonalClient::new(&test_provider); + let io = IoHandler::new(); + io.add_delegate(personal.to_delegate()); + (test_provider, io) +} + +#[test] +fn accounts() { + let (_test_provider, io) = setup(); + + let request = r#"{"jsonrpc": "2.0", "method": "personal_listAccounts", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":["0x0000000000000000000000000000000000000001"],"id":1}"#; + + assert_eq!(io.handle_request(request), Some(response.to_owned())); +} + + +#[test] +fn new_account() { + let (_test_provider, io) = setup(); + + let request = r#"{"jsonrpc": "2.0", "method": "personal_newAccount", "params": ["pass"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x0000000000000000000000000000000000000002","id":1}"#; + + assert_eq!(io.handle_request(request), Some(response.to_owned())); +} + diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index 8c24dd38c..bcd7e7cfe 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -59,21 +59,30 @@ pub trait Eth: Sized + Send + Sync + 'static { /// Returns the number of transactions sent from given address at given time (block number). fn transaction_count(&self, _: Params) -> Result { rpc_unimplemented!() } - /// Returns the number of transactions in a block given block hash. + /// Returns the number of transactions in a block with given hash. fn block_transaction_count_by_hash(&self, _: Params) -> Result { rpc_unimplemented!() } - /// Returns the number of transactions in a block given block number. + /// Returns the number of transactions in a block with given block number. fn block_transaction_count_by_number(&self, _: Params) -> Result { rpc_unimplemented!() } - /// Returns the number of uncles in a given block. - fn block_uncles_count(&self, _: Params) -> Result { rpc_unimplemented!() } + /// Returns the number of uncles in a block with given hash. + fn block_uncles_count_by_hash(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns the number of uncles in a block with given block number. + fn block_uncles_count_by_number(&self, _: Params) -> Result { rpc_unimplemented!() } /// Returns the code at given address at given time (block number). fn code_at(&self, _: Params) -> Result { rpc_unimplemented!() } + /// Signs the data with given address signature. + fn sign(&self, _: Params) -> Result { rpc_unimplemented!() } + /// Sends transaction. fn send_transaction(&self, _: Params) -> Result { rpc_unimplemented!() } + /// Sends signed transaction. + fn send_raw_transaction(&self, _: Params) -> Result { rpc_unimplemented!() } + /// Call contract. fn call(&self, _: Params) -> Result { rpc_unimplemented!() } @@ -130,15 +139,17 @@ pub trait Eth: Sized + Send + Sync + 'static { delegate.add_method("eth_gasPrice", Eth::gas_price); delegate.add_method("eth_accounts", Eth::accounts); delegate.add_method("eth_blockNumber", Eth::block_number); - delegate.add_method("eth_balance", Eth::balance); + delegate.add_method("eth_getBalance", Eth::balance); delegate.add_method("eth_getStorageAt", Eth::storage_at); delegate.add_method("eth_getTransactionCount", Eth::transaction_count); delegate.add_method("eth_getBlockTransactionCountByHash", Eth::block_transaction_count_by_hash); delegate.add_method("eth_getBlockTransactionCountByNumber", Eth::block_transaction_count_by_number); - delegate.add_method("eth_getUncleCountByBlockHash", Eth::block_uncles_count); - delegate.add_method("eth_getUncleCountByBlockNumber", Eth::block_uncles_count); - delegate.add_method("eth_code", Eth::code_at); + delegate.add_method("eth_getUncleCountByBlockHash", Eth::block_uncles_count_by_hash); + delegate.add_method("eth_getUncleCountByBlockNumber", Eth::block_uncles_count_by_number); + delegate.add_method("eth_getCode", Eth::code_at); + delegate.add_method("eth_sign", Eth::sign); delegate.add_method("eth_sendTransaction", Eth::send_transaction); + delegate.add_method("eth_sendRawTransaction", Eth::send_raw_transaction); delegate.add_method("eth_call", Eth::call); delegate.add_method("eth_estimateGas", Eth::estimate_gas); delegate.add_method("eth_getBlockByHash", Eth::block_by_hash); diff --git a/rpc/src/v1/types/transaction.rs b/rpc/src/v1/types/transaction.rs index 0518a58ea..232cf0bf3 100644 --- a/rpc/src/v1/types/transaction.rs +++ b/rpc/src/v1/types/transaction.rs @@ -17,7 +17,6 @@ use util::numbers::*; use ethcore::transaction::{LocalizedTransaction, Action}; use v1::types::{Bytes, OptionalValue}; -use serde::Error; #[derive(Debug, Default, Serialize)] pub struct Transaction { diff --git a/sync/Cargo.toml b/sync/Cargo.toml index cf0027368..8cd59333d 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -10,15 +10,14 @@ authors = ["Ethcore , + /// Miner + miner: Arc, } type RlpResponseResult = Result, PacketDecodeError>; impl ChainSync { /// Create a new instance of syncing strategy. - pub fn new(config: SyncConfig) -> ChainSync { + pub fn new(config: SyncConfig, miner: Arc) -> ChainSync { ChainSync { state: SyncState::NotSynced, starting_block: 0, @@ -244,7 +240,7 @@ impl ChainSync { last_sent_block_number: 0, max_download_ahead_blocks: max(MAX_HEADERS_TO_REQUEST, config.max_download_ahead_blocks), network_id: config.network_id, - transaction_queue: Mutex::new(TransactionQueue::new()), + miner: miner, } } @@ -260,7 +256,6 @@ impl ChainSync { blocks_total: match self.highest_block { Some(x) if x > self.starting_block => x - self.starting_block, _ => 0 }, num_peers: self.peers.len(), num_active_peers: self.peers.values().filter(|p| p.asking != PeerAsking::Nothing).count(), - transaction_queue_pending: self.transaction_queue.lock().unwrap().status().pending, mem_used: // TODO: https://github.com/servo/heapsize/pull/50 // self.downloading_hashes.heap_size_of_children() @@ -299,12 +294,9 @@ impl ChainSync { /// Restart sync pub fn restart(&mut self, io: &mut SyncIo) { self.reset(); - self.last_imported_block = None; - self.last_imported_hash = None; self.starting_block = 0; self.highest_block = None; self.have_common_block = false; - self.transaction_queue.lock().unwrap().clear(); self.starting_block = io.chain().chain_info().best_block_number; self.state = SyncState::NotSynced; } @@ -367,7 +359,7 @@ impl ChainSync { for i in 0..item_count { let info: BlockHeader = try!(r.val_at(i)); let number = BlockNumber::from(info.number); - if number <= self.current_base_block() || self.headers.have_item(&number) { + if (number <= self.current_base_block() && self.have_common_block) || self.headers.have_item(&number) { trace!(target: "sync", "Skipping existing block header"); continue; } @@ -377,11 +369,17 @@ impl ChainSync { } let hash = info.hash(); match io.chain().block_status(BlockId::Hash(hash.clone())) { - BlockStatus::InChain => { - self.have_common_block = true; - self.last_imported_block = Some(number); - self.last_imported_hash = Some(hash.clone()); - trace!(target: "sync", "Found common header {} ({})", number, hash); + BlockStatus::InChain | BlockStatus::Queued => { + if !self.have_common_block || self.current_base_block() < number { + self.last_imported_block = Some(number); + self.last_imported_hash = Some(hash.clone()); + } + if !self.have_common_block { + self.have_common_block = true; + trace!(target: "sync", "Found common header {} ({})", number, hash); + } else { + trace!(target: "sync", "Header already in chain {} ({})", number, hash); + } }, _ => { if self.have_common_block { @@ -589,7 +587,7 @@ impl ChainSync { pub fn on_peer_connected(&mut self, io: &mut SyncIo, peer: PeerId) { trace!(target: "sync", "== Connected {}", peer); if let Err(e) = self.send_status(io) { - warn!(target:"sync", "Error sending status request: {:?}", e); + debug!(target:"sync", "Error sending status request: {:?}", e); io.disable_peer(peer); } } @@ -657,10 +655,7 @@ impl ChainSync { let mut needed_numbers: Vec = Vec::new(); if self.have_common_block && !self.headers.is_empty() && self.headers.range_iter().next().unwrap().0 == self.current_base_block() + 1 { - for (start, ref items) in self.headers.range_iter() { - if needed_bodies.len() >= MAX_BODIES_TO_REQUEST { - break; - } + if let Some((start, ref items)) = self.headers.range_iter().next() { let mut index: BlockNumber = 0; while index != items.len() as BlockNumber && needed_bodies.len() < MAX_BODIES_TO_REQUEST { let block = start + index; @@ -704,7 +699,10 @@ impl ChainSync { if !self.have_common_block { // download backwards until common block is found 1 header at a time let chain_info = io.chain().chain_info(); - start = chain_info.best_block_number; + start = match self.last_imported_block { + Some(n) => n, + None => chain_info.best_block_number, + }; if !self.headers.is_empty() { start = min(start, self.headers.range_iter().next().unwrap().0 - 1); } @@ -845,18 +843,12 @@ impl ChainSync { /// Remove downloaded bocks/headers starting from specified number. /// Used to recover from an error and re-download parts of the chain detected as bad. fn remove_downloaded_blocks(&mut self, start: BlockNumber) { - for n in self.headers.get_tail(&start) { - if let Some(ref header_data) = self.headers.find_item(&n) { - let header_to_delete = HeaderView::new(&header_data.data); - let header_id = HeaderId { - transactions_root: header_to_delete.transactions_root(), - uncles: header_to_delete.uncles_hash() - }; - self.header_ids.remove(&header_id); - } - self.downloading_bodies.remove(&n); - self.downloading_headers.remove(&n); - } + let ids = self.header_ids.drain().filter(|&(_, v)| v < start).collect(); + self.header_ids = ids; + let hdrs = self.downloading_headers.drain().filter(|v| *v < start).collect(); + self.downloading_headers = hdrs; + let bodies = self.downloading_bodies.drain().filter(|v| *v < start).collect(); + self.downloading_bodies = bodies; self.headers.remove_from(&start); self.bodies.remove_from(&start); } @@ -934,16 +926,17 @@ impl ChainSync { } /// Called when peer sends us new transactions fn on_peer_transactions(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { - let chain = io.chain(); let item_count = r.item_count(); trace!(target: "sync", "{} -> Transactions ({} entries)", peer_id, item_count); - let fetch_latest_nonce = |a : &Address| chain.nonce(a); - let mut transaction_queue = self.transaction_queue.lock().unwrap(); + let mut transactions = Vec::with_capacity(item_count); for i in 0..item_count { let tx: SignedTransaction = try!(r.val_at(i)); - let _ = transaction_queue.add(tx, &fetch_latest_nonce); + transactions.push(tx); } + let chain = io.chain(); + let fetch_nonce = |a: &Address| chain.nonce(a); + let _ = self.miner.import_transactions(transactions, fetch_nonce); Ok(()) } @@ -1099,7 +1092,7 @@ impl ChainSync { let rlp = UntrustedRlp::new(data); if packet_id != STATUS_PACKET && !self.peers.contains_key(&peer) { - warn!(target:"sync", "Unexpected packet from unregistered peer: {}:{}", peer, io.peer_info(peer)); + debug!(target:"sync", "Unexpected packet from unregistered peer: {}:{}", peer, io.peer_info(peer)); return; } let result = match packet_id { @@ -1274,48 +1267,16 @@ impl ChainSync { } /// called when block is imported to chain, updates transactions queue and propagates the blocks - pub fn chain_new_blocks(&mut self, io: &mut SyncIo, good: &[H256], bad: &[H256], _retracted: &[H256]) { - fn fetch_transactions(chain: &BlockChainClient, hash: &H256) -> Vec { - let block = chain - .block(BlockId::Hash(hash.clone())) - // Client should send message after commit to db and inserting to chain. - .expect("Expected in-chain blocks."); - let block = BlockView::new(&block); - block.transactions() - } - - - { - let chain = io.chain(); - let good = good.par_iter().map(|h| fetch_transactions(chain, h)); - let bad = bad.par_iter().map(|h| fetch_transactions(chain, h)); - - good.for_each(|txs| { - let mut transaction_queue = self.transaction_queue.lock().unwrap(); - let hashes = txs.iter().map(|tx| tx.hash()).collect::>(); - transaction_queue.remove_all(&hashes, |a| chain.nonce(a)); - }); - bad.for_each(|txs| { - // populate sender - for tx in &txs { - let _sender = tx.sender(); - } - let mut transaction_queue = self.transaction_queue.lock().unwrap(); - let _ = transaction_queue.add_all(txs, |a| chain.nonce(a)); - }); - } - + pub fn chain_new_blocks(&mut self, io: &mut SyncIo, imported: &[H256], invalid: &[H256], enacted: &[H256], retracted: &[H256]) { + // Notify miner + self.miner.chain_new_blocks(io.chain(), imported, invalid, enacted, retracted); // Propagate latests blocks self.propagate_latest_blocks(io); // TODO [todr] propagate transactions? } - /// Add transaction to the transaction queue - pub fn insert_transaction(&self, transaction: ethcore::transaction::SignedTransaction, fetch_nonce: &T) -> Result<(), Error> - where T: Fn(&Address) -> U256 - { - let mut queue = self.transaction_queue.lock().unwrap(); - queue.add(transaction, fetch_nonce) + pub fn chain_new_head(&mut self, io: &mut SyncIo) { + self.miner.prepare_sealing(io.chain()); } } @@ -1328,6 +1289,7 @@ mod tests { use super::{PeerInfo, PeerAsking}; use ethcore::header::*; use ethcore::client::*; + use ethminer::{Miner, MinerService}; fn get_dummy_block(order: u32, parent_hash: H256) -> Bytes { let mut header = Header::new(); @@ -1437,7 +1399,7 @@ mod tests { } fn dummy_sync_with_peer(peer_latest_hash: H256) -> ChainSync { - let mut sync = ChainSync::new(SyncConfig::default()); + let mut sync = ChainSync::new(SyncConfig::default(), Miner::new()); sync.peers.insert(0, PeerInfo { protocol_version: 0, @@ -1658,15 +1620,15 @@ mod tests { let mut io = TestIo::new(&mut client, &mut queue, None); // when - sync.chain_new_blocks(&mut io, &[], &good_blocks, &[]); - assert_eq!(sync.transaction_queue.lock().unwrap().status().future, 0); - assert_eq!(sync.transaction_queue.lock().unwrap().status().pending, 1); - sync.chain_new_blocks(&mut io, &good_blocks, &retracted_blocks, &[]); + sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks); + assert_eq!(sync.miner.status().transaction_queue_future, 0); + assert_eq!(sync.miner.status().transaction_queue_pending, 1); + sync.chain_new_blocks(&mut io, &good_blocks, &[], &[], &retracted_blocks); // then - let status = sync.transaction_queue.lock().unwrap().status(); - assert_eq!(status.pending, 1); - assert_eq!(status.future, 0); + let status = sync.miner.status(); + assert_eq!(status.transaction_queue_pending, 1); + assert_eq!(status.transaction_queue_future, 0); } #[test] diff --git a/sync/src/lib.rs b/sync/src/lib.rs index ef0df0dcf..1c87da2de 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -32,18 +32,21 @@ //! extern crate ethcore_util as util; //! extern crate ethcore; //! extern crate ethsync; +//! extern crate ethminer; //! use std::env; //! use std::sync::Arc; //! use util::network::{NetworkService, NetworkConfiguration}; //! use ethcore::client::{Client, ClientConfig}; //! use ethsync::{EthSync, SyncConfig}; +//! use ethminer::Miner; //! use ethcore::ethereum; //! //! fn main() { //! let mut service = NetworkService::start(NetworkConfiguration::new()).unwrap(); //! let dir = env::temp_dir(); //! let client = Client::new(ClientConfig::default(), ethereum::new_frontier(), &dir, service.io().channel()).unwrap(); -//! EthSync::register(&mut service, SyncConfig::default(), client); +//! let miner = Miner::new(); +//! EthSync::register(&mut service, SyncConfig::default(), client, miner); //! } //! ``` @@ -52,28 +55,27 @@ extern crate log; #[macro_use] extern crate ethcore_util as util; extern crate ethcore; +extern crate ethminer; extern crate env_logger; extern crate time; extern crate rand; -extern crate rayon; #[macro_use] extern crate heapsize; use std::ops::*; use std::sync::*; -use ethcore::client::Client; use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId}; use util::TimerToken; use util::{U256, ONE_U256}; -use chain::ChainSync; +use ethcore::client::Client; use ethcore::service::SyncMessage; +use ethminer::Miner; use io::NetSyncIo; +use chain::ChainSync; mod chain; mod io; mod range_collection; -mod transaction_queue; -pub use transaction_queue::TransactionQueue; #[cfg(test)] mod tests; @@ -99,8 +101,6 @@ impl Default for SyncConfig { pub trait SyncProvider: Send + Sync { /// Get sync status fn status(&self) -> SyncStatus; - /// Insert transaction in the sync transaction queue - fn insert_transaction(&self, transaction: ethcore::transaction::SignedTransaction); } /// Ethereum network protocol handler @@ -115,10 +115,10 @@ pub use self::chain::{SyncStatus, SyncState}; impl EthSync { /// Creates and register protocol with the network service - pub fn register(service: &mut NetworkService, config: SyncConfig, chain: Arc) -> Arc { + pub fn register(service: &mut NetworkService, config: SyncConfig, chain: Arc, miner: Arc) -> Arc { let sync = Arc::new(EthSync { chain: chain, - sync: RwLock::new(ChainSync::new(config)), + sync: RwLock::new(ChainSync::new(config, miner)), }); service.register_protocol(sync.clone(), "eth", &[62u8, 63u8]).expect("Error registering eth protocol handler"); sync @@ -140,16 +140,6 @@ impl SyncProvider for EthSync { fn status(&self) -> SyncStatus { self.sync.read().unwrap().status() } - - /// Insert transaction in transaction queue - fn insert_transaction(&self, transaction: ethcore::transaction::SignedTransaction) { - use util::numbers::*; - - let nonce_fn = |a: &Address| self.chain.state().nonce(a) + U256::one(); - let sync = self.sync.write().unwrap(); - sync.insert_transaction(transaction, &nonce_fn).unwrap_or_else( - |e| warn!(target: "sync", "Error inserting transaction to queue: {:?}", e)); - } } impl NetworkProtocolHandler for EthSync { @@ -174,13 +164,16 @@ impl NetworkProtocolHandler for EthSync { self.sync.write().unwrap().maintain_sync(&mut NetSyncIo::new(io, self.chain.deref())); } - #[allow(single_match)] fn message(&self, io: &NetworkContext, message: &SyncMessage) { match *message { - SyncMessage::NewChainBlocks { ref good, ref bad, ref retracted } => { + SyncMessage::NewChainBlocks { ref imported, ref invalid, ref enacted, ref retracted } => { let mut sync_io = NetSyncIo::new(io, self.chain.deref()); - self.sync.write().unwrap().chain_new_blocks(&mut sync_io, good, bad, retracted); + self.sync.write().unwrap().chain_new_blocks(&mut sync_io, imported, invalid, enacted, retracted); }, + SyncMessage::NewChainHead => { + let mut sync_io = NetSyncIo::new(io, self.chain.deref()); + self.sync.write().unwrap().chain_new_head(&mut sync_io); + } _ => {/* Ignore other messages */}, } } diff --git a/sync/src/range_collection.rs b/sync/src/range_collection.rs index 664d7c7a3..0628df401 100644 --- a/sync/src/range_collection.rs +++ b/sync/src/range_collection.rs @@ -300,12 +300,17 @@ fn test_range() { let mut r = ranges.clone(); r.remove_from(&20); assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal); - r.remove_from(&17); - assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p'][..])]), Ordering::Equal); - r.remove_from(&15); + r.remove_from(&18); + assert!(!r.have_item(&18)); + assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q'][..])]), Ordering::Equal); + r.remove_from(&16); + assert!(!r.have_item(&16)); assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..])]), Ordering::Equal); r.remove_from(&3); assert_eq!(r.range_iter().cmp(vec![(2, &['b'][..])]), Ordering::Equal); + r.remove_from(&1); + assert_eq!(r.range_iter().next(), None); + let mut r = ranges.clone(); r.remove_from(&2); assert_eq!(r.range_iter().next(), None); } diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index ca4ae5158..b3e62ccc6 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -18,6 +18,7 @@ use util::*; use ethcore::client::{TestBlockChainClient, BlockChainClient}; use io::SyncIo; use chain::ChainSync; +use ethminer::Miner; use ::SyncConfig; pub struct TestIo<'p> { @@ -92,7 +93,7 @@ impl TestNet { for _ in 0..n { net.peers.push(TestPeer { chain: TestBlockChainClient::new(), - sync: ChainSync::new(SyncConfig::default()), + sync: ChainSync::new(SyncConfig::default(), Miner::new()), queue: VecDeque::new(), }); } @@ -167,6 +168,6 @@ impl TestNet { pub fn trigger_chain_new_blocks(&mut self, peer_id: usize) { let mut peer = self.peer_mut(peer_id); - peer.sync.chain_new_blocks(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None), &[], &[], &[]); + peer.sync.chain_new_blocks(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None), &[], &[], &[], &[]); } } diff --git a/test.sh b/test.sh index 0f5edb0d1..0bf08e67f 100755 --- a/test.sh +++ b/test.sh @@ -1,4 +1,4 @@ #!/bin/sh # Running Parity Full Test Sute -cargo test --features ethcore/json-tests $1 -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity +cargo test --features ethcore/json-tests $1 -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer diff --git a/util/Cargo.toml b/util/Cargo.toml index ce40bb052..2630fe4bb 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -27,7 +27,7 @@ crossbeam = "0.2" slab = "0.1" sha3 = { path = "sha3" } serde = "0.7.0" -clippy = { version = "0.0.49", optional = true } +clippy = { version = "0.0.50", optional = true } json-tests = { path = "json-tests" } rustc_version = "0.1.0" igd = "0.4.2" diff --git a/util/bigint/src/uint.rs b/util/bigint/src/uint.rs index c18ed839c..ea617d570 100644 --- a/util/bigint/src/uint.rs +++ b/util/bigint/src/uint.rs @@ -36,7 +36,6 @@ //! The functions here are designed to be fast. //! - #[cfg(all(asm_available, target_arch="x86_64"))] use std::mem; use std::fmt; diff --git a/util/src/error.rs b/util/src/error.rs index 68aa3e648..409cc0e5d 100644 --- a/util/src/error.rs +++ b/util/src/error.rs @@ -21,12 +21,13 @@ use network::NetworkError; use rlp::DecoderError; use io; use std::fmt; +use hash::H256; #[derive(Debug)] /// Error in database subsystem. pub enum BaseDataError { /// An entry was removed more times than inserted. - NegativelyReferencedHash, + NegativelyReferencedHash(H256), } #[derive(Debug)] diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs index 2e4e966c1..83a80b7c2 100644 --- a/util/src/journaldb/archivedb.rs +++ b/util/src/journaldb/archivedb.rs @@ -35,6 +35,7 @@ use std::env; pub struct ArchiveDB { overlay: MemoryDB, backing: Arc, + latest_era: Option, } // all keys must be at least 12 bytes @@ -60,9 +61,11 @@ impl ArchiveDB { backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); } + let latest_era = backing.get(&LATEST_ERA_KEY).expect("Low-level database error.").map(|val| decode::(&val)); ArchiveDB { overlay: MemoryDB::new(), backing: Arc::new(backing), + latest_era: latest_era, } } @@ -129,6 +132,7 @@ impl JournalDB for ArchiveDB { Box::new(ArchiveDB { overlay: MemoryDB::new(), backing: self.backing.clone(), + latest_era: self.latest_era, }) } @@ -137,10 +141,10 @@ impl JournalDB for ArchiveDB { } fn is_empty(&self) -> bool { - self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() + self.latest_era.is_none() } - fn commit(&mut self, _: u64, _: &H256, _: Option<(u64, H256)>) -> Result { + fn commit(&mut self, now: u64, _: &H256, _: Option<(u64, H256)>) -> Result { let batch = DBTransaction::new(); let mut inserts = 0usize; let mut deletes = 0usize; @@ -156,6 +160,10 @@ impl JournalDB for ArchiveDB { deletes += 1; } } + if self.latest_era.map_or(true, |e| now > e) { + try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + self.latest_era = Some(now); + } try!(self.backing.write(batch)); Ok((inserts + deletes) as u32) } diff --git a/util/src/journaldb/earlymergedb.rs b/util/src/journaldb/earlymergedb.rs index 83be31fb4..7cb00b993 100644 --- a/util/src/journaldb/earlymergedb.rs +++ b/util/src/journaldb/earlymergedb.rs @@ -64,6 +64,7 @@ pub struct EarlyMergeDB { overlay: MemoryDB, backing: Arc, refs: Option>>>, + latest_era: Option, } // all keys must be at least 12 bytes @@ -90,11 +91,13 @@ impl EarlyMergeDB { backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); } - let refs = Some(Arc::new(RwLock::new(EarlyMergeDB::read_refs(&backing)))); + let (latest_era, refs) = EarlyMergeDB::read_refs(&backing); + let refs = Some(Arc::new(RwLock::new(refs))); EarlyMergeDB { overlay: MemoryDB::new(), backing: Arc::new(backing), refs: refs, + latest_era: latest_era, } } @@ -168,7 +171,7 @@ impl EarlyMergeDB { trace!(target: "jdb.fine", "replay_keys: (end) refs={:?}", refs); } - fn kill_keys(deletes: &Vec, refs: &mut HashMap, batch: &DBTransaction, from: RemoveFrom, trace: bool) { + fn kill_keys(deletes: &[H256], refs: &mut HashMap, batch: &DBTransaction, from: RemoveFrom, trace: bool) { // with a kill on {queue_refs: 1, in_archive: true}, we have two options: // - convert to {queue_refs: 1, in_archive: false} (i.e. remove it from the conceptual archive) // - convert to {queue_refs: 0, in_archive: true} (i.e. remove it from the conceptual queue) @@ -225,9 +228,9 @@ impl EarlyMergeDB { #[cfg(test)] fn can_reconstruct_refs(&self) -> bool { - let reconstructed = Self::read_refs(&self.backing); + let (latest_era, reconstructed) = Self::read_refs(&self.backing); let refs = self.refs.as_ref().unwrap().write().unwrap(); - if *refs != reconstructed { + if *refs != reconstructed || latest_era != self.latest_era { let clean_refs = refs.iter().filter_map(|(k, v)| if reconstructed.get(k) == Some(v) {None} else {Some((k.clone(), v.clone()))}).collect::>(); let clean_recon = reconstructed.into_iter().filter_map(|(k, v)| if refs.get(&k) == Some(&v) {None} else {Some((k.clone(), v.clone()))}).collect::>(); warn!(target: "jdb", "mem: {:?} != log: {:?}", clean_refs, clean_recon); @@ -241,10 +244,12 @@ impl EarlyMergeDB { self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) } - fn read_refs(db: &Database) -> HashMap { + fn read_refs(db: &Database) -> (Option, HashMap) { let mut refs = HashMap::new(); + let mut latest_era = None; if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { let mut era = decode::(&val); + latest_era = Some(era); loop { let mut index = 0usize; while let Some(rlp_data) = db.get({ @@ -265,7 +270,7 @@ impl EarlyMergeDB { era -= 1; } } - refs + (latest_era, refs) } } @@ -320,6 +325,7 @@ impl JournalDB for EarlyMergeDB { overlay: MemoryDB::new(), backing: self.backing.clone(), refs: self.refs.clone(), + latest_era: self.latest_era.clone(), }) } @@ -334,8 +340,10 @@ impl JournalDB for EarlyMergeDB { } } + + #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { - // journal format: + // journal format: // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, n] => [ ... ] @@ -435,7 +443,10 @@ impl JournalDB for EarlyMergeDB { trace!(target: "jdb.ops", " Deletes: {:?}", removes); } try!(batch.put(&last, r.as_raw())); - try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + if self.latest_era.map_or(true, |e| now > e) { + try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + self.latest_era = Some(now); + } } // apply old commits' details @@ -464,7 +475,7 @@ impl JournalDB for EarlyMergeDB { if trace { trace!(target: "jdb.ops", " Finalising: {:?}", inserts); } - for k in inserts.iter() { + for k in &inserts { match refs.get(k).cloned() { None => { // [in archive] -> SHIFT remove -> SHIFT insert None->Some{queue_refs: 1, in_archive: true} -> TAKE remove Some{queue_refs: 1, in_archive: true}->None -> TAKE insert @@ -480,7 +491,7 @@ impl JournalDB for EarlyMergeDB { Self::set_already_in(&batch, k); refs.insert(k.clone(), RefInfo{ queue_refs: x - 1, in_archive: true }); } - Some( RefInfo{queue_refs: _, in_archive: true} ) => { + Some( RefInfo{in_archive: true, ..} ) => { // Invalid! Reinserted the same key twice. warn!("Key {} inserted twice into same fork.", k); } @@ -552,6 +563,26 @@ mod tests { assert!(jdb.exists(&x)); } + #[test] + fn insert_older_era() { + let mut jdb = EarlyMergeDB::new_temp(); + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let bar = jdb.insert(b"bar"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&bar); + jdb.commit(0, &b"0b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + } + #[test] fn long_history() { // history is 3 @@ -907,7 +938,7 @@ mod tests { assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&foo)); } - + #[test] fn reopen_test() { let mut dir = ::std::env::temp_dir(); @@ -942,7 +973,7 @@ mod tests { jdb.commit(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); } - + #[test] fn reopen_remove_three() { init_log(); @@ -996,7 +1027,7 @@ mod tests { assert!(!jdb.exists(&foo)); } } - + #[test] fn reopen_fork() { let mut dir = ::std::env::temp_dir(); diff --git a/util/src/journaldb/mod.rs b/util/src/journaldb/mod.rs index cf5278368..e73c12969 100644 --- a/util/src/journaldb/mod.rs +++ b/util/src/journaldb/mod.rs @@ -23,6 +23,7 @@ pub mod traits; mod archivedb; mod earlymergedb; mod overlayrecentdb; +mod refcounteddb; /// Export the JournalDB trait. pub use self::traits::JournalDB; @@ -75,6 +76,6 @@ pub fn new(path: &str, algorithm: Algorithm) -> Box { Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(path)), Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(path)), Algorithm::OverlayRecent => Box::new(overlayrecentdb::OverlayRecentDB::new(path)), - _ => unimplemented!(), + Algorithm::RefCounted => Box::new(refcounteddb::RefCountedDB::new(path)), } } diff --git a/util/src/journaldb/overlayrecentdb.rs b/util/src/journaldb/overlayrecentdb.rs index 8dd4d1752..efbd26c3b 100644 --- a/util/src/journaldb/overlayrecentdb.rs +++ b/util/src/journaldb/overlayrecentdb.rs @@ -33,14 +33,14 @@ use super::JournalDB; /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. /// -/// There are two memory overlays: -/// - Transaction overlay contains current transaction data. It is merged with with history +/// There are two memory overlays: +/// - Transaction overlay contains current transaction data. It is merged with with history /// overlay on each `commit()` -/// - History overlay contains all data inserted during the history period. When the node +/// - History overlay contains all data inserted during the history period. When the node /// in the overlay becomes ancient it is written to disk on `commit()` /// -/// There is also a journal maintained in memory and on the disk as well which lists insertions -/// and removals for each commit during the history period. This is used to track +/// There is also a journal maintained in memory and on the disk as well which lists insertions +/// and removals for each commit during the history period. This is used to track /// data nodes that go out of history scope and must be written to disk. /// /// Commit workflow: @@ -50,12 +50,12 @@ use super::JournalDB; /// 3. Clear the transaction overlay. /// 4. For a canonical journal record that becomes ancient inserts its insertions into the disk DB /// 5. For each journal record that goes out of the history scope (becomes ancient) remove its -/// insertions from the history overlay, decreasing the reference counter and removing entry if +/// insertions from the history overlay, decreasing the reference counter and removing entry if /// if reaches zero. -/// 6. For a canonical journal record that becomes ancient delete its removals from the disk only if +/// 6. For a canonical journal record that becomes ancient delete its removals from the disk only if /// the removed key is not present in the history overlay. /// 7. Delete ancient record from memory and disk. -/// + pub struct OverlayRecentDB { transaction_overlay: MemoryDB, backing: Arc, @@ -66,7 +66,7 @@ pub struct OverlayRecentDB { struct JournalOverlay { backing_overlay: MemoryDB, journal: HashMap>, - latest_era: u64, + latest_era: Option, } #[derive(PartialEq)] @@ -152,10 +152,10 @@ impl OverlayRecentDB { let mut journal = HashMap::new(); let mut overlay = MemoryDB::new(); let mut count = 0; - let mut latest_era = 0; + let mut latest_era = None; if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { - latest_era = decode::(&val); - let mut era = latest_era; + let mut era = decode::(&val); + latest_era = Some(era); loop { let mut index = 0usize; while let Some(rlp_data) = db.get({ @@ -223,7 +223,7 @@ impl JournalDB for OverlayRecentDB { let mut tx = self.transaction_overlay.drain(); let inserted_keys: Vec<_> = tx.iter().filter_map(|(k, &(_, c))| if c > 0 { Some(k.clone()) } else { None }).collect(); let removed_keys: Vec<_> = tx.iter().filter_map(|(k, &(_, c))| if c < 0 { Some(k.clone()) } else { None }).collect(); - // Increase counter for each inserted key no matter if the block is canonical or not. + // Increase counter for each inserted key no matter if the block is canonical or not. let insertions = tx.drain().filter_map(|(k, (v, c))| if c > 0 { Some((k, v)) } else { None }); r.append(id); r.begin_list(inserted_keys.len()); @@ -236,14 +236,14 @@ impl JournalDB for OverlayRecentDB { r.append(&removed_keys); let mut k = RlpStream::new_list(3); - let index = journal_overlay.journal.get(&now).map(|j| j.len()).unwrap_or(0); + let index = journal_overlay.journal.get(&now).map_or(0, |j| j.len()); k.append(&now); k.append(&index); k.append(&&PADDING[..]); try!(batch.put(&k.drain(), r.as_raw())); - if now >= journal_overlay.latest_era { + if journal_overlay.latest_era.map_or(true, |e| now > e) { try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); - journal_overlay.latest_era = now; + journal_overlay.latest_era = Some(now); } journal_overlay.journal.entry(now).or_insert_with(Vec::new).push(JournalEntry { id: id.clone(), insertions: inserted_keys, deletions: removed_keys }); } @@ -345,14 +345,14 @@ impl HashDB for OverlayRecentDB { self.lookup(key).is_some() } - fn insert(&mut self, value: &[u8]) -> H256 { + fn insert(&mut self, value: &[u8]) -> H256 { self.transaction_overlay.insert(value) } fn emplace(&mut self, key: H256, value: Bytes) { - self.transaction_overlay.emplace(key, value); + self.transaction_overlay.emplace(key, value); } - fn kill(&mut self, key: &H256) { - self.transaction_overlay.kill(key); + fn kill(&mut self, key: &H256) { + self.transaction_overlay.kill(key); } } @@ -749,7 +749,7 @@ mod tests { assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&foo)); } - + #[test] fn reopen_test() { let mut dir = ::std::env::temp_dir(); @@ -784,7 +784,7 @@ mod tests { jdb.commit(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); } - + #[test] fn reopen_remove_three() { init_log(); @@ -838,7 +838,7 @@ mod tests { assert!(!jdb.exists(&foo)); } } - + #[test] fn reopen_fork() { let mut dir = ::std::env::temp_dir(); @@ -870,4 +870,24 @@ mod tests { assert!(!jdb.exists(&bar)); } } + + #[test] + fn insert_older_era() { + let mut jdb = OverlayRecentDB::new_temp(); + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let bar = jdb.insert(b"bar"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&bar); + jdb.commit(0, &b"0b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + } } diff --git a/util/src/journaldb/refcounteddb.rs b/util/src/journaldb/refcounteddb.rs new file mode 100644 index 000000000..590964247 --- /dev/null +++ b/util/src/journaldb/refcounteddb.rs @@ -0,0 +1,285 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Disk-backed, ref-counted JournalDB implementation. + +use common::*; +use rlp::*; +use hashdb::*; +use overlaydb::*; +use super::traits::JournalDB; +use kvdb::{Database, DBTransaction, DatabaseConfig}; +#[cfg(test)] +use std::env; + +/// Implementation of the HashDB trait for a disk-backed database with a memory overlay +/// and latent-removal semantics. +/// +/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to +/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect +/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before +/// the removals actually take effect. +pub struct RefCountedDB { + forward: OverlayDB, + backing: Arc, + latest_era: Option, + inserts: Vec, + removes: Vec, +} + +const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const DB_VERSION : u32 = 512; +const PADDING : [u8; 10] = [ 0u8; 10 ]; + +impl RefCountedDB { + /// Create a new instance given a `backing` database. + pub fn new(path: &str) -> RefCountedDB { + let opts = DatabaseConfig { + prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix + }; + let backing = Database::open(&opts, path).unwrap_or_else(|e| { + panic!("Error opening state db: {}", e); + }); + if !backing.is_empty() { + match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::(&v))) { + Ok(Some(DB_VERSION)) => {}, + v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v) + } + } else { + backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); + } + + let backing = Arc::new(backing); + let latest_era = backing.get(&LATEST_ERA_KEY).expect("Low-level database error.").map(|val| decode::(&val)); + + RefCountedDB { + forward: OverlayDB::new_with_arc(backing.clone()), + backing: backing, + inserts: vec![], + removes: vec![], + latest_era: latest_era, + } + } + + /// Create a new instance with an anonymous temporary database. + #[cfg(test)] + fn new_temp() -> RefCountedDB { + let mut dir = env::temp_dir(); + dir.push(H32::random().hex()); + Self::new(dir.to_str().unwrap()) + } +} + +impl HashDB for RefCountedDB { + fn keys(&self) -> HashMap { self.forward.keys() } + fn lookup(&self, key: &H256) -> Option<&[u8]> { self.forward.lookup(key) } + fn exists(&self, key: &H256) -> bool { self.forward.exists(key) } + fn insert(&mut self, value: &[u8]) -> H256 { let r = self.forward.insert(value); self.inserts.push(r.clone()); r } + fn emplace(&mut self, key: H256, value: Bytes) { self.inserts.push(key.clone()); self.forward.emplace(key, value); } + fn kill(&mut self, key: &H256) { self.removes.push(key.clone()); } +} + +impl JournalDB for RefCountedDB { + fn spawn(&self) -> Box { + Box::new(RefCountedDB { + forward: self.forward.clone(), + backing: self.backing.clone(), + latest_era: self.latest_era, + inserts: self.inserts.clone(), + removes: self.removes.clone(), + }) + } + + fn mem_used(&self) -> usize { + self.inserts.heap_size_of_children() + self.removes.heap_size_of_children() + } + + fn is_empty(&self) -> bool { + self.latest_era.is_none() + } + + fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + // journal format: + // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] + // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] + // [era, n] => [ ... ] + + // TODO: store last_era, reclaim_period. + + // when we make a new commit, we journal the inserts and removes. + // for each end_era that we journaled that we are no passing by, + // we remove all of its removes assuming it is canonical and all + // of its inserts otherwise. + + // record new commit's details. + let batch = DBTransaction::new(); + { + let mut index = 0usize; + let mut last; + + while try!(self.backing.get({ + let mut r = RlpStream::new_list(3); + r.append(&now); + r.append(&index); + r.append(&&PADDING[..]); + last = r.drain(); + &last + })).is_some() { + index += 1; + } + + let mut r = RlpStream::new_list(3); + r.append(id); + r.append(&self.inserts); + r.append(&self.removes); + try!(batch.put(&last, r.as_raw())); + + trace!(target: "rcdb", "new journal for time #{}.{} => {}: inserts={:?}, removes={:?}", now, index, id, self.inserts, self.removes); + + self.inserts.clear(); + self.removes.clear(); + + if self.latest_era.map_or(true, |e| now > e) { + try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + self.latest_era = Some(now); + } + } + + // apply old commits' details + if let Some((end_era, canon_id)) = end { + let mut index = 0usize; + let mut last; + while let Some(rlp_data) = { +// trace!(target: "rcdb", "checking for journal #{}.{}", end_era, index); + try!(self.backing.get({ + let mut r = RlpStream::new_list(3); + r.append(&end_era); + r.append(&index); + r.append(&&PADDING[..]); + last = r.drain(); + &last + })) + } { + let rlp = Rlp::new(&rlp_data); + let our_id: H256 = rlp.val_at(0); + let to_remove: Vec = rlp.val_at(if canon_id == our_id {2} else {1}); + trace!(target: "rcdb", "delete journal for time #{}.{}=>{}, (canon was {}): deleting {:?}", end_era, index, our_id, canon_id, to_remove); + for i in &to_remove { + self.forward.remove(i); + } + try!(batch.delete(&last)); + index += 1; + } + } + + let r = try!(self.forward.commit_to_batch(&batch)); + try!(self.backing.write(batch)); + Ok(r) + } +} + +#[cfg(test)] +mod tests { + use common::*; + use super::*; + use super::super::traits::JournalDB; + use hashdb::*; + + #[test] + fn long_history() { + // history is 3 + let mut jdb = RefCountedDB::new_temp(); + let h = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.remove(&h); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(!jdb.exists(&h)); + } + + #[test] + fn complex() { + // history is 1 + let mut jdb = RefCountedDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + jdb.remove(&bar); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + let foo = jdb.insert(b"foo"); + jdb.remove(&baz); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(!jdb.exists(&baz)); + + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(!jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(!jdb.exists(&baz)); + } + + #[test] + fn fork() { + // history is 1 + let mut jdb = RefCountedDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&baz)); + assert!(!jdb.exists(&bar)); + } +} diff --git a/util/src/keys/directory.rs b/util/src/keys/directory.rs index d0d3393cd..a92bf4593 100644 --- a/util/src/keys/directory.rs +++ b/util/src/keys/directory.rs @@ -542,6 +542,8 @@ impl KeyDirectory { if removes.is_empty() { return; } let mut cache = self.cache.write().unwrap(); for key in removes { cache.remove(&key); } + + cache.shrink_to_fit(); } /// Reports how many keys are currently cached. diff --git a/util/src/keys/geth_import.rs b/util/src/keys/geth_import.rs index dbd9f0fe0..6c684c37d 100644 --- a/util/src/keys/geth_import.rs +++ b/util/src/keys/geth_import.rs @@ -161,6 +161,7 @@ mod tests { } #[test] + #[cfg(feature="heavy-tests")] fn can_decrypt_with_imported() { use keys::store::EncryptedHashMap; diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index 6a5efc87d..78540bdb0 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -120,17 +120,37 @@ impl AccountProvider for AccountService { } } +impl Default for AccountService { + fn default() -> Self { + AccountService::new() + } +} + impl AccountService { /// New account service with the default location - pub fn new() -> AccountService { + pub fn new() -> Self { let secret_store = RwLock::new(SecretStore::new()); secret_store.write().unwrap().try_import_existing(); AccountService { secret_store: secret_store } } + + #[cfg(test)] + fn new_test(temp: &::devtools::RandomTempPath) -> Self { + let secret_store = RwLock::new(SecretStore::new_test(temp)); + AccountService { + secret_store: secret_store + } + } + + /// Ticks the account service + pub fn tick(&self) { + self.secret_store.write().unwrap().collect_garbage(); + } } + impl Default for SecretStore { fn default() -> Self { SecretStore::new() @@ -250,6 +270,20 @@ impl SecretStore { let unlock = try!(read_lock.get(account).ok_or(SigningError::AccountNotUnlocked)); Ok(unlock.secret as crypto::Secret) } + + /// Makes account unlocks expire and removes unused key files from memory + pub fn collect_garbage(&mut self) { + let mut garbage_lock = self.unlocks.write().unwrap(); + self.directory.collect_garbage(); + let utc = UTC::now(); + let expired_addresses = garbage_lock.iter() + .filter(|&(_, unlock)| unlock.expires < utc) + .map(|(address, _)| address.clone()).collect::>(); + + for expired in expired_addresses { garbage_lock.remove(&expired); } + + garbage_lock.shrink_to_fit(); + } } fn derive_key_iterations(password: &str, salt: &H256, c: u32) -> (Bytes, Bytes) { @@ -356,12 +390,11 @@ impl EncryptedHashMap for SecretStore { } -#[cfg(test)] +#[cfg(all(test, feature="heavy-tests"))] mod vector_tests { use super::{derive_mac,derive_key_iterations}; use common::*; - #[test] fn mac_vector() { let password = "testpassword"; @@ -388,6 +421,7 @@ mod tests { use devtools::*; use common::*; use crypto::KeyPair; + use chrono::*; #[test] fn can_insert() { @@ -464,6 +498,7 @@ mod tests { } #[test] + #[cfg(feature="heavy-tests")] fn can_get() { let temp = RandomTempPath::create_dir(); let key_id = { @@ -568,9 +603,37 @@ mod tests { let temp = RandomTempPath::create_dir(); let mut sstore = SecretStore::new_test(&temp); let addr = sstore.new_account("test").unwrap(); - let _ok = sstore.unlock_account(&addr, "test").unwrap(); + sstore.unlock_account(&addr, "test").unwrap(); let secret = sstore.account_secret(&addr).unwrap(); let kp = KeyPair::from_secret(secret).unwrap(); assert_eq!(Address::from(kp.public().sha3()), addr); } + + #[test] + fn can_create_service() { + let temp = RandomTempPath::create_dir(); + let svc = AccountService::new_test(&temp); + assert!(svc.accounts().unwrap().is_empty()); + } + + #[test] + fn accounts_expire() { + use std::collections::hash_map::*; + + let temp = RandomTempPath::create_dir(); + let svc = AccountService::new_test(&temp); + let address = svc.new_account("pass").unwrap(); + svc.unlock_account(&address, "pass").unwrap(); + assert!(svc.account_secret(&address).is_ok()); + { + let ss_rw = svc.secret_store.write().unwrap(); + let mut ua_rw = ss_rw.unlocks.write().unwrap(); + let entry = ua_rw.entry(address); + if let Entry::Occupied(mut occupied) = entry { occupied.get_mut().expires = UTC::now() - Duration::minutes(1); } + } + + svc.tick(); + + assert!(svc.account_secret(&address).is_err()); + } } diff --git a/util/src/network/connection.rs b/util/src/network/connection.rs index fe65be6d1..a560c1a91 100644 --- a/util/src/network/connection.rs +++ b/util/src/network/connection.rs @@ -160,12 +160,12 @@ impl Connection { } } - /// Get socket token + /// Get socket token pub fn token(&self) -> StreamToken { self.token } - /// Replace socket token + /// Replace socket token pub fn set_token(&mut self, token: StreamToken) { self.token = token; } @@ -261,13 +261,13 @@ pub struct EncryptedConnection { } impl EncryptedConnection { - - /// Get socket token + + /// Get socket token pub fn token(&self) -> StreamToken { self.connection.token } - /// Replace socket token + /// Replace socket token pub fn set_token(&mut self, token: StreamToken) { self.connection.set_token(token); } @@ -513,8 +513,14 @@ mod tests { buf_size: usize, } + impl Default for TestSocket { + fn default() -> Self { + TestSocket::new() + } + } + impl TestSocket { - fn new() -> TestSocket { + fn new() -> Self { TestSocket { read_buffer: vec![], write_buffer: vec![], @@ -593,8 +599,14 @@ mod tests { type TestConnection = GenericConnection; + impl Default for TestConnection { + fn default() -> Self { + TestConnection::new() + } + } + impl TestConnection { - pub fn new() -> TestConnection { + pub fn new() -> Self { TestConnection { token: 999998888usize, socket: TestSocket::new(), @@ -609,8 +621,14 @@ mod tests { type TestBrokenConnection = GenericConnection; + impl Default for TestBrokenConnection { + fn default() -> Self { + TestBrokenConnection::new() + } + } + impl TestBrokenConnection { - pub fn new() -> TestBrokenConnection { + pub fn new() -> Self { TestBrokenConnection { token: 999998888usize, socket: TestBrokenSocket { error: "test broken socket".to_owned() }, diff --git a/util/src/network/discovery.rs b/util/src/network/discovery.rs index a381b49f8..6fda99cdb 100644 --- a/util/src/network/discovery.rs +++ b/util/src/network/discovery.rs @@ -18,7 +18,6 @@ use bytes::Bytes; use std::net::SocketAddr; use std::collections::{HashSet, HashMap, BTreeMap, VecDeque}; use std::mem; -use std::cmp; use std::default::Default; use mio::*; use mio::udp::*; @@ -407,27 +406,34 @@ impl Discovery { let target: NodeId = try!(rlp.val_at(0)); let timestamp: u64 = try!(rlp.val_at(1)); try!(self.check_timestamp(timestamp)); - let limit = (MAX_DATAGRAM_SIZE - 109) / 90; let nearest = Discovery::nearest_node_entries(&target, &self.node_buckets); if nearest.is_empty() { return Ok(None); } - let mut rlp = RlpStream::new_list(1); - rlp.begin_list(cmp::min(limit, nearest.len())); - for n in 0 .. nearest.len() { - rlp.begin_list(4); - nearest[n].endpoint.to_rlp(&mut rlp); - rlp.append(&nearest[n].id); - if (n + 1) % limit == 0 || n == nearest.len() - 1 { - self.send_packet(PACKET_NEIGHBOURS, &from, &rlp.drain()); - trace!(target: "discovery", "Sent {} Neighbours to {:?}", n, &from); - rlp = RlpStream::new_list(1); - rlp.begin_list(cmp::min(limit, nearest.len() - n)); - } + let mut packets = Discovery::prepare_neighbours_packets(&nearest); + for p in packets.drain(..) { + self.send_packet(PACKET_NEIGHBOURS, &from, &p); } + trace!(target: "discovery", "Sent {} Neighbours to {:?}", nearest.len(), &from); Ok(None) } + fn prepare_neighbours_packets(nearest: &[NodeEntry]) -> Vec { + let limit = (MAX_DATAGRAM_SIZE - 109) / 90; + let chunks = nearest.chunks(limit); + let packets = chunks.map(|c| { + let mut rlp = RlpStream::new_list(1); + rlp.begin_list(c.len()); + for n in 0 .. c.len() { + rlp.begin_list(4); + c[n].endpoint.to_rlp(&mut rlp); + rlp.append(&c[n].id); + } + rlp.out() + }); + packets.collect() + } + fn on_neighbours(&mut self, rlp: &UntrustedRlp, _node: &NodeId, from: &SocketAddr) -> Result, NetworkError> { // TODO: validate packet let mut added = HashMap::new(); @@ -506,6 +512,24 @@ mod tests { use crypto::KeyPair; use std::str::FromStr; use rustc_serialize::hex::FromHex; + use rlp::*; + + #[test] + fn find_node() { + let mut nearest = Vec::new(); + let node = Node::from_str("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@127.0.0.1:7770").unwrap(); + for _ in 0..1000 { + nearest.push( NodeEntry { id: node.id.clone(), endpoint: node.endpoint.clone() }); + } + + let packets = Discovery::prepare_neighbours_packets(&nearest); + assert_eq!(packets.len(), 77); + for p in &packets[0..76] { + assert!(p.len() > 1280/2); + assert!(p.len() <= 1280); + } + assert!(packets.last().unwrap().len() > 0); + } #[test] fn discovery() { diff --git a/util/src/network/host.rs b/util/src/network/host.rs index f63c75e9f..57aae51d7 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -541,7 +541,7 @@ impl Host where Message: Send + Sync + Clone { match TcpStream::connect(&address) { Ok(socket) => socket, Err(e) => { - warn!("Can't connect to address {:?}: {:?}", address, e); + debug!("Can't connect to address {:?}: {:?}", address, e); return; } } @@ -695,6 +695,14 @@ impl Host where Message: Send + Sync + Clone { return; } }; + if !originated { + let session_count = sessions.count(); + let ideal_peers = { self.info.read().unwrap().deref().config.ideal_peers }; + if session_count >= ideal_peers as usize { + session.disconnect(DisconnectReason::TooManyPeers); + return; + } + } let result = sessions.insert_with(move |session_token| { session.set_token(session_token); io.deregister_stream(token).expect("Error deleting handshake registration"); diff --git a/util/src/overlaydb.rs b/util/src/overlaydb.rs index 7c9b6b04b..b5dec75e2 100644 --- a/util/src/overlaydb.rs +++ b/util/src/overlaydb.rs @@ -26,7 +26,7 @@ use std::ops::*; use std::sync::*; use std::env; use std::collections::HashMap; -use kvdb::{Database}; +use kvdb::{Database, DBTransaction}; /// Implementation of the HashDB trait for a disk-backed database with a memory overlay. /// @@ -36,7 +36,7 @@ use kvdb::{Database}; /// /// `lookup()` and `contains()` maintain normal behaviour - all `insert()` and `remove()` /// queries have an immediate effect in terms of these functions. -//#[derive(Clone)] +#[derive(Clone)] pub struct OverlayDB { overlay: MemoryDB, backing: Arc, @@ -58,6 +58,36 @@ impl OverlayDB { Self::new(Database::open_default(dir.to_str().unwrap()).unwrap()) } + /// Commit all operations to given batch. + pub fn commit_to_batch(&mut self, batch: &DBTransaction) -> Result { + let mut ret = 0u32; + let mut deletes = 0usize; + for i in self.overlay.drain().into_iter() { + let (key, (value, rc)) = i; + if rc != 0 { + match self.payload(&key) { + Some(x) => { + let (back_value, back_rc) = x; + let total_rc: i32 = back_rc as i32 + rc; + if total_rc < 0 { + return Err(From::from(BaseDataError::NegativelyReferencedHash(key))); + } + deletes += if self.put_payload_in_batch(batch, &key, (back_value, total_rc as u32)) {1} else {0}; + } + None => { + if rc < 0 { + return Err(From::from(BaseDataError::NegativelyReferencedHash(key))); + } + self.put_payload_in_batch(batch, &key, (value, rc as u32)); + } + }; + ret += 1; + } + } + trace!("OverlayDB::commit() deleted {} nodes", deletes); + Ok(ret) + } + /// Commit all memory operations to the backing database. /// /// Returns either an error or the number of items changed in the backing database. @@ -96,13 +126,13 @@ impl OverlayDB { let (back_value, back_rc) = x; let total_rc: i32 = back_rc as i32 + rc; if total_rc < 0 { - return Err(From::from(BaseDataError::NegativelyReferencedHash)); + return Err(From::from(BaseDataError::NegativelyReferencedHash(key))); } deletes += if self.put_payload(&key, (back_value, total_rc as u32)) {1} else {0}; } None => { if rc < 0 { - return Err(From::from(BaseDataError::NegativelyReferencedHash)); + return Err(From::from(BaseDataError::NegativelyReferencedHash(key))); } self.put_payload(&key, (value, rc as u32)); } @@ -137,6 +167,9 @@ impl OverlayDB { /// ``` pub fn revert(&mut self) { self.overlay.clear(); } + /// Get the number of references that would be committed. + pub fn commit_refs(&self, key: &H256) -> i32 { self.overlay.raw(&key).map_or(0, |&(_, refs)| refs) } + /// Get the refs and value of the given key. fn payload(&self, key: &H256) -> Option<(Bytes, u32)> { self.backing.get(&key.bytes()) @@ -147,6 +180,20 @@ impl OverlayDB { }) } + /// Put the refs and value of the given key, possibly deleting it from the db. + fn put_payload_in_batch(&self, batch: &DBTransaction, key: &H256, payload: (Bytes, u32)) -> bool { + if payload.1 > 0 { + let mut s = RlpStream::new_list(2); + s.append(&payload.1); + s.append(&payload.0); + batch.put(&key.bytes(), s.as_raw()).expect("Low-level database error. Some issue with your hard disk?"); + false + } else { + batch.delete(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?"); + true + } + } + /// Put the refs and value of the given key, possibly deleting it from the db. fn put_payload(&self, key: &H256, payload: (Bytes, u32)) -> bool { if payload.1 > 0 { diff --git a/util/src/rlp/rlpin.rs b/util/src/rlp/rlpin.rs index d58fa95e8..9d3fcb2fa 100644 --- a/util/src/rlp/rlpin.rs +++ b/util/src/rlp/rlpin.rs @@ -24,7 +24,7 @@ impl<'a> From> for Rlp<'a> { } /// Data-oriented view onto trusted rlp-slice. -/// +/// /// Unlikely to `UntrustedRlp` doesn't bother you with error /// handling. It assumes that you know what you are doing. #[derive(Debug)] @@ -44,7 +44,7 @@ impl<'a, 'view> View<'a, 'view> for Rlp<'a> where 'a: 'view { type Data = &'a [u8]; type Item = Rlp<'a>; type Iter = RlpIterator<'a, 'view>; - + /// Create a new instance of `Rlp` fn new(bytes: &'a [u8]) -> Rlp<'a> { Rlp { @@ -116,7 +116,7 @@ impl<'a, 'view> View<'a, 'view> for Rlp<'a> where 'a: 'view { impl <'a, 'view> Rlp<'a> where 'a: 'view { fn view_as_val(r: &R) -> T where R: View<'a, 'view>, T: RlpDecodable { let res: Result = r.as_val(); - res.unwrap_or_else(|_| panic!()) + res.unwrap_or_else(|e| panic!("DecodeError: {}", e)) } /// Decode into an object diff --git a/util/src/table.rs b/util/src/table.rs index 5ba572289..c3b2006cf 100644 --- a/util/src/table.rs +++ b/util/src/table.rs @@ -40,7 +40,7 @@ impl Default for Table } // There is default but clippy does not detect it? -#[allow(new_without_default)] +#[cfg_attr(feature="dev", allow(new_without_default))] impl Table where Row: Eq + Hash + Clone, Col: Eq + Hash {