From e2e1d221d59ab58621583350a8910b0ee9a5a17e Mon Sep 17 00:00:00 2001 From: Afri Schoedon <5chdn@users.noreply.github.com> Date: Mon, 10 Sep 2018 22:52:45 +0200 Subject: [PATCH] Beta backports to 2.0.4 (#9452) * parity-version: bump beta to 2.0.4 * [light/jsonrpc] Provide the actual account for `eth_coinbase` RPC and unify error handeling for light and full client (#9383) * Provide the actual `account` for eth_coinbase The previous implementation always provided the `zero address` on `eth_coinbase` RPC. Now, instead the actual address is returned on success or an error when no account(s) is found! * full client `eth_coinbase` return err In the full-client return an error when no account is found instead of returning the `zero address` * Remove needless blocks on single import * Remove needless `static` lifetime on const * Fix `rpc_eth_author` test * parity: print correct keys path on startup (#9501) * aura: don't report skipped primaries when empty steps are enabled (#9435) * Only check warp syncing for eth_getWorks (#9484) * Only check warp syncing for eth_getWorks * Use SyncStatus::is_snapshot_syncing * Fix Snapshot restoration failure on Windows (#9491) * Close Blooms DB files before DB restoration * PR Grumbles I * PR Grumble * Grumble --- Cargo.lock | 12 +- Cargo.toml | 2 +- ethcore/src/blockchain/blockchain.rs | 16 +++ ethcore/src/client/client.rs | 4 +- ethcore/src/engines/authority_round/mod.rs | 6 +- parity/run.rs | 8 +- rpc/src/v1/impls/eth.rs | 18 ++- rpc/src/v1/impls/light/eth.rs | 6 +- rpc/src/v1/tests/mocked/eth.rs | 12 +- util/blooms-db/src/db.rs | 157 ++++++++++++++++----- util/blooms-db/src/lib.rs | 5 + util/dir/src/lib.rs | 4 +- util/version/Cargo.toml | 2 +- 13 files changed, 182 insertions(+), 70 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7052145e3..709abec02 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1930,7 +1930,7 @@ source = "git+https://github.com/paritytech/parity-common#0045887fecd2fec39e56c9 name = "parity-clib" version = "1.12.0" dependencies = [ - "parity-ethereum 2.0.3", + "parity-ethereum 2.0.4", ] [[package]] @@ -1947,7 +1947,7 @@ dependencies = [ [[package]] name = "parity-ethereum" -version = "2.0.3" +version = "2.0.4" dependencies = [ "ansi_term 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", "atty 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1996,7 +1996,7 @@ dependencies = [ "parity-rpc 1.12.0", "parity-rpc-client 1.4.0", "parity-updater 1.12.0", - "parity-version 2.0.3", + "parity-version 2.0.4", "parity-whisper 0.1.0", "parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "path 0.1.1 (git+https://github.com/paritytech/parity-common)", @@ -2135,7 +2135,7 @@ dependencies = [ "parity-crypto 0.1.0 (git+https://github.com/paritytech/parity-common)", "parity-reactor 0.1.0", "parity-updater 1.12.0", - "parity-version 2.0.3", + "parity-version 2.0.4", "parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "patricia-trie 0.2.1 (git+https://github.com/paritytech/parity-common)", "plain_hasher 0.1.0 (git+https://github.com/paritytech/parity-common)", @@ -2206,7 +2206,7 @@ dependencies = [ "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "parity-bytes 0.1.0 (git+https://github.com/paritytech/parity-common)", "parity-hash-fetch 1.12.0", - "parity-version 2.0.3", + "parity-version 2.0.4", "parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "path 0.1.1 (git+https://github.com/paritytech/parity-common)", "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2217,7 +2217,7 @@ dependencies = [ [[package]] name = "parity-version" -version = "2.0.3" +version = "2.0.4" dependencies = [ "parity-bytes 0.1.0 (git+https://github.com/paritytech/parity-common)", "rlp 0.2.1 (git+https://github.com/paritytech/parity-common)", diff --git a/Cargo.toml b/Cargo.toml index 10cafc7a5..38245fd39 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,7 @@ description = "Parity Ethereum client" name = "parity-ethereum" # NOTE Make sure to update util/version/Cargo.toml as well -version = "2.0.3" +version = "2.0.4" license = "GPL-3.0" authors = ["Parity Technologies "] diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index b2ba886b4..5eb46fecf 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -35,6 +35,7 @@ use encoded; use engines::epoch::{Transition as EpochTransition, PendingTransition as PendingEpochTransition}; use engines::ForkChoice; use ethereum_types::{H256, Bloom, BloomRef, U256}; +use error::Error as EthcoreError; use header::*; use heapsize::HeapSizeOf; use itertools::Itertools; @@ -60,6 +61,21 @@ pub trait BlockChainDB: Send + Sync { /// Trace blooms database. fn trace_blooms(&self) -> &blooms_db::Database; + + /// Restore the DB from the given path + fn restore(&self, new_db: &str) -> Result<(), EthcoreError> { + // First, close the Blooms databases + self.blooms().close()?; + self.trace_blooms().close()?; + + // Restore the key_value DB + self.key_value().restore(new_db)?; + + // Re-open the Blooms databases + self.blooms().reopen()?; + self.trace_blooms().reopen()?; + Ok(()) + } } /// Generic database handler. This trait contains one function `open`. When called, it opens database with a diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 1052403a8..9e72eb36b 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -1296,9 +1296,7 @@ impl snapshot::DatabaseRestore for Client { let mut tracedb = self.tracedb.write(); self.importer.miner.clear(); let db = self.db.write(); - db.key_value().restore(new_db)?; - db.blooms().reopen()?; - db.trace_blooms().reopen()?; + db.restore(new_db)?; let cache_size = state_db.cache_size(); *state_db = StateDB::new(journaldb::new(db.key_value().clone(), self.pruning, ::db::COL_STATE), cache_size); diff --git a/ethcore/src/engines/authority_round/mod.rs b/ethcore/src/engines/authority_round/mod.rs index e32d213f0..582045377 100644 --- a/ethcore/src/engines/authority_round/mod.rs +++ b/ethcore/src/engines/authority_round/mod.rs @@ -984,8 +984,10 @@ impl Engine for AuthorityRound { self.clear_empty_steps(parent_step); // report any skipped primaries between the parent block and - // the block we're sealing - self.report_skipped(header, step, u64::from(parent_step) as usize, &*validators, set_number); + // the block we're sealing, unless we have empty steps enabled + if header.number() < self.empty_steps_transition { + self.report_skipped(header, step, u64::from(parent_step) as usize, &*validators, set_number); + } let mut fields = vec![ encode(&step).into_vec(), diff --git a/parity/run.rs b/parity/run.rs index f5f477379..73324aa08 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -185,7 +185,7 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc) -> Result(cmd: RunCmd, logger: Arc, on_client_rq: } //print out running parity environment - print_running_environment(&spec.name, &cmd.dirs, &db_dirs); + print_running_environment(&spec.data_dir, &cmd.dirs, &db_dirs); // display info about used pruning algorithm info!("State DB configuration: {}{}{}", @@ -926,9 +926,9 @@ fn daemonize(_pid_file: String) -> Result<(), String> { Err("daemon is no supported on windows".into()) } -fn print_running_environment(spec_name: &String, dirs: &Directories, db_dirs: &DatabaseDirectories) { +fn print_running_environment(data_dir: &str, dirs: &Directories, db_dirs: &DatabaseDirectories) { info!("Starting {}", Colour::White.bold().paint(version())); - info!("Keys path {}", Colour::White.bold().paint(dirs.keys_path(spec_name).to_string_lossy().into_owned())); + info!("Keys path {}", Colour::White.bold().paint(dirs.keys_path(data_dir).to_string_lossy().into_owned())); info!("DB path {}", Colour::White.bold().paint(db_dirs.db_root_path().to_string_lossy().into_owned())); } diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 3505709cb..5717dd70d 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -33,7 +33,7 @@ use ethcore::log_entry::LogEntry; use ethcore::miner::{self, MinerService}; use ethcore::snapshot::SnapshotService; use ethcore::encoded; -use sync::{SyncProvider}; +use sync::SyncProvider; use miner::external::ExternalMinerService; use transaction::{SignedTransaction, LocalizedTransaction}; @@ -52,7 +52,7 @@ use v1::types::{ }; use v1::metadata::Metadata; -const EXTRA_INFO_PROOF: &'static str = "Object exists in blockchain (fetched earlier), extra_info is always available if object exists; qed"; +const EXTRA_INFO_PROOF: &str = "Object exists in blockchain (fetched earlier), extra_info is always available if object exists; qed"; /// Eth RPC options pub struct EthClientOptions { @@ -502,12 +502,16 @@ impl Eth for EthClient< } fn author(&self) -> Result { - let mut miner = self.miner.authoring_params().author; + let miner = self.miner.authoring_params().author; if miner == 0.into() { - miner = self.accounts.accounts().ok().and_then(|a| a.get(0).cloned()).unwrap_or_default(); + self.accounts.accounts() + .ok() + .and_then(|a| a.first().cloned()) + .map(From::from) + .ok_or_else(|| errors::account("No accounts were found", "")) + } else { + Ok(RpcH160::from(miner)) } - - Ok(RpcH160::from(miner)) } fn is_mining(&self) -> Result { @@ -737,7 +741,7 @@ impl Eth for EthClient< let queue_info = self.client.queue_info(); let total_queue_size = queue_info.total_queue_size(); - if is_major_importing(Some(sync_status.state), queue_info) || total_queue_size > MAX_QUEUE_SIZE_TO_MINE_ON { + if sync_status.is_snapshot_syncing() || total_queue_size > MAX_QUEUE_SIZE_TO_MINE_ON { trace!(target: "miner", "Syncing. Cannot give any work."); return Err(errors::no_work()); } diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index 74e5e296e..1f6a54da1 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -253,7 +253,11 @@ impl Eth for EthClient { } fn author(&self) -> Result { - Ok(Default::default()) + self.accounts.accounts() + .ok() + .and_then(|a| a.first().cloned()) + .map(From::from) + .ok_or_else(|| errors::account("No accounts were found", "")) } fn is_mining(&self) -> Result { diff --git a/rpc/src/v1/tests/mocked/eth.rs b/rpc/src/v1/tests/mocked/eth.rs index 602621194..8accddd56 100644 --- a/rpc/src/v1/tests/mocked/eth.rs +++ b/rpc/src/v1/tests/mocked/eth.rs @@ -350,25 +350,27 @@ fn rpc_eth_author() { let make_res = |addr| r#"{"jsonrpc":"2.0","result":""#.to_owned() + &format!("0x{:x}", addr) + r#"","id":1}"#; let tester = EthTester::default(); - let req = r#"{ + let request = r#"{ "jsonrpc": "2.0", "method": "eth_coinbase", "params": [], "id": 1 }"#; - // No accounts - returns zero - assert_eq!(tester.io.handle_request_sync(req), Some(make_res(Address::zero()))); + let response = r#"{"jsonrpc":"2.0","error":{"code":-32023,"message":"No accounts were found","data":"\"\""},"id":1}"#; + + // No accounts - returns an error indicating that no accounts were found + assert_eq!(tester.io.handle_request_sync(request), Some(response.to_string())); // Account set - return first account let addr = tester.accounts_provider.new_account(&"123".into()).unwrap(); - assert_eq!(tester.io.handle_request_sync(req), Some(make_res(addr))); + assert_eq!(tester.io.handle_request_sync(request), Some(make_res(addr))); for i in 0..20 { let addr = tester.accounts_provider.new_account(&format!("{}", i).into()).unwrap(); tester.miner.set_author(addr.clone(), None).unwrap(); - assert_eq!(tester.io.handle_request_sync(req), Some(make_res(addr))); + assert_eq!(tester.io.handle_request_sync(request), Some(make_res(addr))); } } diff --git a/util/blooms-db/src/db.rs b/util/blooms-db/src/db.rs index faeb038f2..3adedeaf1 100644 --- a/util/blooms-db/src/db.rs +++ b/util/blooms-db/src/db.rs @@ -14,13 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::{io, fmt}; +use std::{error, io, fmt}; use std::path::{Path, PathBuf}; use ethbloom; use file::{File, FileIterator}; +fn other_io_err(e: E) -> io::Error where E: Into> { + io::Error::new(io::ErrorKind::Other, e) +} + /// Bloom positions in database files. #[derive(Debug)] struct Positions { @@ -39,8 +43,14 @@ impl Positions { } } -/// Blooms database. -pub struct Database { +struct DatabaseFilesIterator<'a> { + pub top: FileIterator<'a>, + pub mid: FileIterator<'a>, + pub bot: FileIterator<'a>, +} + +/// Blooms database files. +struct DatabaseFiles { /// Top level bloom file /// /// Every bloom represents 16 blooms on mid level @@ -53,6 +63,52 @@ pub struct Database { /// /// Every bloom is an ethereum header bloom bot: File, +} + +impl DatabaseFiles { + /// Open the blooms db files + pub fn open(path: &Path) -> io::Result { + Ok(DatabaseFiles { + top: File::open(path.join("top.bdb"))?, + mid: File::open(path.join("mid.bdb"))?, + bot: File::open(path.join("bot.bdb"))?, + }) + } + + pub fn accrue_bloom(&mut self, pos: Positions, bloom: ethbloom::BloomRef) -> io::Result<()> { + self.top.accrue_bloom::(pos.top, bloom)?; + self.mid.accrue_bloom::(pos.mid, bloom)?; + self.bot.replace_bloom::(pos.bot, bloom)?; + Ok(()) + } + + pub fn iterator_from(&mut self, pos: Positions) -> io::Result { + Ok(DatabaseFilesIterator { + top: self.top.iterator_from(pos.top)?, + mid: self.mid.iterator_from(pos.mid)?, + bot: self.bot.iterator_from(pos.bot)?, + }) + } + + fn flush(&mut self) -> io::Result<()> { + self.top.flush()?; + self.mid.flush()?; + self.bot.flush()?; + Ok(()) + } +} + +impl Drop for DatabaseFiles { + /// Flush the database files on drop + fn drop(&mut self) { + self.flush().ok(); + } +} + +/// Blooms database. +pub struct Database { + /// Database files + db_files: Option, /// Database path path: PathBuf, } @@ -60,61 +116,71 @@ pub struct Database { impl Database { /// Opens blooms database. pub fn open

(path: P) -> io::Result where P: AsRef { - let path = path.as_ref(); + let path: PathBuf = path.as_ref().to_path_buf(); let database = Database { - top: File::open(path.join("top.bdb"))?, - mid: File::open(path.join("mid.bdb"))?, - bot: File::open(path.join("bot.bdb"))?, - path: path.to_owned(), + db_files: Some(DatabaseFiles::open(&path)?), + path: path, }; Ok(database) } - /// Reopens the database at the same location. - pub fn reopen(&mut self) -> io::Result<()> { - self.top = File::open(self.path.join("top.bdb"))?; - self.mid = File::open(self.path.join("mid.bdb"))?; - self.bot = File::open(self.path.join("bot.bdb"))?; + /// Close the inner-files + pub fn close(&mut self) -> io::Result<()> { + self.db_files = None; Ok(()) } - /// Insert consecutive blooms into database starting with positon from. + /// Reopens the database at the same location. + pub fn reopen(&mut self) -> io::Result<()> { + self.db_files = Some(DatabaseFiles::open(&self.path)?); + Ok(()) + } + + /// Insert consecutive blooms into database starting at the given positon. pub fn insert_blooms<'a, I, B>(&mut self, from: u64, blooms: I) -> io::Result<()> where ethbloom::BloomRef<'a>: From, I: Iterator { - for (index, bloom) in (from..).into_iter().zip(blooms.map(Into::into)) { - let pos = Positions::from_index(index); + match self.db_files { + Some(ref mut db_files) => { + for (index, bloom) in (from..).into_iter().zip(blooms.map(Into::into)) { + let pos = Positions::from_index(index); - // constant forks make lead to increased ration of false positives in bloom filters - // since we do not rebuild top or mid level, but we should not be worried about that - // most of the time events at block n(a) occur also on block n(b) or n+1(b) - self.top.accrue_bloom::(pos.top, bloom)?; - self.mid.accrue_bloom::(pos.mid, bloom)?; - self.bot.replace_bloom::(pos.bot, bloom)?; + // Constant forks may lead to increased ratio of false positives in bloom filters + // since we do not rebuild top or mid level, but we should not be worried about that + // because most of the time events at block n(a) occur also on block n(b) or n+1(b) + db_files.accrue_bloom(pos, bloom)?; + } + db_files.flush()?; + Ok(()) + }, + None => Err(other_io_err("Database is closed")), } - self.top.flush()?; - self.mid.flush()?; - self.bot.flush() } /// Returns an iterator yielding all indexes containing given bloom. pub fn iterate_matching<'a, 'b, B, I, II>(&'a mut self, from: u64, to: u64, blooms: II) -> io::Result> where ethbloom::BloomRef<'b>: From, 'b: 'a, II: IntoIterator + Copy, I: Iterator { - let index = from / 256 * 256; - let pos = Positions::from_index(index); + match self.db_files { + Some(ref mut db_files) => { + let index = from / 256 * 256; + let pos = Positions::from_index(index); + let files_iter = db_files.iterator_from(pos)?; - let iter = DatabaseIterator { - top: self.top.iterator_from(pos.top)?, - mid: self.mid.iterator_from(pos.mid)?, - bot: self.bot.iterator_from(pos.bot)?, - state: IteratorState::Top, - from, - to, - index, - blooms, - }; + let iter = DatabaseIterator { + top: files_iter.top, + mid: files_iter.mid, + bot: files_iter.bot, + state: IteratorState::Top, + from, + to, + index, + blooms, + }; - Ok(iter) + Ok(iter) + }, + None => Err(other_io_err("Database is closed")), + } } } @@ -285,4 +351,19 @@ mod tests { let matches = database.iterate_matching(256, 257, Some(&Bloom::from(0x10))).unwrap().collect::, _>>().unwrap(); assert_eq!(matches, vec![256, 257]); } + + #[test] + fn test_db_close() { + let tempdir = TempDir::new("").unwrap(); + let blooms = vec![Bloom::from(0x100), Bloom::from(0x01), Bloom::from(0x10), Bloom::from(0x11)]; + let mut database = Database::open(tempdir.path()).unwrap(); + + // Close the DB and ensure inserting blooms errors + database.close().unwrap(); + assert!(database.insert_blooms(254, blooms.iter()).is_err()); + + // Reopen it and ensure inserting blooms is OK + database.reopen().unwrap(); + assert!(database.insert_blooms(254, blooms.iter()).is_ok()); + } } diff --git a/util/blooms-db/src/lib.rs b/util/blooms-db/src/lib.rs index c63815422..a810b9732 100644 --- a/util/blooms-db/src/lib.rs +++ b/util/blooms-db/src/lib.rs @@ -54,6 +54,11 @@ impl Database { Ok(result) } + /// Closes the inner database + pub fn close(&self) -> io::Result<()> { + self.database.lock().close() + } + /// Reopens database at the same location. pub fn reopen(&self) -> io::Result<()> { self.database.lock().reopen() diff --git a/util/dir/src/lib.rs b/util/dir/src/lib.rs index aac672b1f..b6b9bdbf4 100644 --- a/util/dir/src/lib.rs +++ b/util/dir/src/lib.rs @@ -128,9 +128,9 @@ impl Directories { } /// Get the keys path - pub fn keys_path(&self, spec_name: &str) -> PathBuf { + pub fn keys_path(&self, data_dir: &str) -> PathBuf { let mut dir = PathBuf::from(&self.keys); - dir.push(spec_name); + dir.push(data_dir); dir } } diff --git a/util/version/Cargo.toml b/util/version/Cargo.toml index 7dc624a19..9cb345576 100644 --- a/util/version/Cargo.toml +++ b/util/version/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "parity-version" # NOTE: this value is used for Parity version string (via env CARGO_PKG_VERSION) -version = "2.0.3" +version = "2.0.4" authors = ["Parity Technologies "] build = "build.rs"