diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..f679363b8 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,12 @@ +# Contributing to Parity + +## License + +By contributing to Parity, you agree that your contributions will be +licensed under the [BSD License](LICENSE). + +At the top of every source code file you alter, after the initial +licence section, please append a second section that reads: + +Portions contributed by YOUR NAME are hereby placed under the BSD licence. + diff --git a/Cargo.toml b/Cargo.toml index fb52d14d5..ca2ad9c6c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,7 +12,7 @@ rustc-serialize = "0.3" docopt = "0.6" docopt_macros = "0.6" ctrlc = { git = "https://github.com/tomusdrw/rust-ctrlc.git" } -clippy = "0.0.37" +clippy = "0.0.41" ethcore-util = { path = "util" } ethcore = { path = "ethcore" } ethsync = { path = "sync" } diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index be7652e17..90d147a02 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -18,7 +18,7 @@ ethcore-util = { path = "../util" } evmjit = { path = "../evmjit", optional = true } ethash = { path = "../ethash" } num_cpus = "0.2" -clippy = "0.0.37" +clippy = "0.0.41" crossbeam = "0.1.5" lazy_static = "0.1" diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index c7a5ec268..d7fcdbc30 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -182,7 +182,7 @@ pub struct Client { } const HISTORY: u64 = 1000; -const CLIENT_DB_VER_STR: &'static str = "2.0"; +const CLIENT_DB_VER_STR: &'static str = "2.1"; impl Client { /// Create a new client with given spec and DB path. @@ -319,12 +319,11 @@ impl Client { self.report.write().unwrap().accrue_block(&block); trace!(target: "client", "Imported #{} ({})", header.number(), header.hash()); ret += 1; - - if self.block_queue.read().unwrap().queue_info().is_empty() { - io.send(NetworkIoMessage::User(SyncMessage::BlockVerified)).unwrap(); - } } self.block_queue.write().unwrap().mark_as_good(&good_blocks); + if !good_blocks.is_empty() && self.block_queue.read().unwrap().queue_info().is_empty() { + io.send(NetworkIoMessage::User(SyncMessage::BlockVerified)).unwrap(); + } ret } @@ -353,12 +352,12 @@ impl Client { self.chain.write().unwrap().configure_cache(pref_cache_size, max_cache_size); } - fn block_hash(&self, id: BlockId) -> Option { + fn block_hash(chain: &BlockChain, id: BlockId) -> Option { match id { BlockId::Hash(hash) => Some(hash), - BlockId::Number(number) => self.chain.read().unwrap().block_hash(number), - BlockId::Earliest => self.chain.read().unwrap().block_hash(0), - BlockId::Latest => Some(self.chain.read().unwrap().best_block_hash()) + BlockId::Number(number) => chain.block_hash(number), + BlockId::Earliest => chain.block_hash(0), + BlockId::Latest => Some(chain.best_block_hash()) } } @@ -374,12 +373,14 @@ impl Client { impl BlockChainClient for Client { fn block_header(&self, id: BlockId) -> Option { - self.block_hash(id).and_then(|hash| self.chain.read().unwrap().block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec())) + let chain = self.chain.read().unwrap(); + Self::block_hash(&chain, id).and_then(|hash| chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec())) } fn block_body(&self, id: BlockId) -> Option { - self.block_hash(id).and_then(|hash| { - self.chain.read().unwrap().block(&hash).map(|bytes| { + let chain = self.chain.read().unwrap(); + Self::block_hash(&chain, id).and_then(|hash| { + chain.block(&hash).map(|bytes| { let rlp = Rlp::new(&bytes); let mut body = RlpStream::new_list(2); body.append_raw(rlp.at(1).as_raw(), 1); @@ -390,21 +391,24 @@ impl BlockChainClient for Client { } fn block(&self, id: BlockId) -> Option { - self.block_hash(id).and_then(|hash| { - self.chain.read().unwrap().block(&hash) + let chain = self.chain.read().unwrap(); + Self::block_hash(&chain, id).and_then(|hash| { + chain.block(&hash) }) } fn block_status(&self, id: BlockId) -> BlockStatus { - match self.block_hash(id) { - Some(ref hash) if self.chain.read().unwrap().is_known(hash) => BlockStatus::InChain, + let chain = self.chain.read().unwrap(); + match Self::block_hash(&chain, id) { + Some(ref hash) if chain.is_known(hash) => BlockStatus::InChain, Some(hash) => self.block_queue.read().unwrap().block_status(&hash), None => BlockStatus::Unknown } } fn block_total_difficulty(&self, id: BlockId) -> Option { - self.block_hash(id).and_then(|hash| self.chain.read().unwrap().block_details(&hash)).map(|d| d.total_difficulty) + let chain = self.chain.read().unwrap(); + Self::block_hash(&chain, id).and_then(|hash| chain.block_details(&hash)).map(|d| d.total_difficulty) } fn code(&self, address: &Address) -> Option { @@ -412,13 +416,14 @@ impl BlockChainClient for Client { } fn transaction(&self, id: TransactionId) -> Option { + let chain = self.chain.read().unwrap(); match id { - TransactionId::Hash(ref hash) => self.chain.read().unwrap().transaction_address(hash), - TransactionId::Location(id, index) => self.block_hash(id).map(|hash| TransactionAddress { + TransactionId::Hash(ref hash) => chain.transaction_address(hash), + TransactionId::Location(id, index) => Self::block_hash(&chain, id).map(|hash| TransactionAddress { block_hash: hash, index: index }) - }.and_then(|address| self.chain.read().unwrap().transaction(&address)) + }.and_then(|address| chain.transaction(&address)) } fn tree_route(&self, from: &H256, to: &H256) -> Option { diff --git a/ethcore/src/ethereum/mod.rs b/ethcore/src/ethereum/mod.rs index 11c20ddbe..0d1dcd8d5 100644 --- a/ethcore/src/ethereum/mod.rs +++ b/ethcore/src/ethereum/mod.rs @@ -24,7 +24,7 @@ pub mod ethash; /// Export the denominations module. pub mod denominations; -pub use self::ethash::*; +pub use self::ethash::{Ethash}; pub use self::denominations::*; use super::spec::*; diff --git a/install-deps.sh b/install-deps.sh index 28a442040..774d18720 100755 --- a/install-deps.sh +++ b/install-deps.sh @@ -688,7 +688,7 @@ function run_installer() info "- Run tests with:" info " ${b}cargo test --release --features ethcore/json-tests -p ethcore${reset}" info "- Install the client with:" - info " ${b}sudo cp parity/target/release/parity${reset}" + info " ${b}sudo cp parity/target/release/parity${reset} /usr/local/bin" echo } diff --git a/parity/main.rs b/parity/main.rs index 5627f87f0..62b73ca47 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -55,17 +55,19 @@ Parity. Ethereum Client. Copyright 2015, 2016 Ethcore (UK) Limited Usage: - parity [options] [ ... ] + parity [options] [ --no-bootstrap | ... ] Options: --chain CHAIN Specify the blockchain type. CHAIN may be either a JSON chain specification file or frontier, mainnet, morden, or testnet [default: frontier]. -d --db-path PATH Specify the database & configuration directory path [default: $HOME/.parity] + --no-bootstrap Don't bother trying to connect to any nodes initially. --listen-address URL Specify the IP/port on which to listen for peers [default: 0.0.0.0:30304]. --public-address URL Specify the IP/port on which peers may connect [default: 0.0.0.0:30304]. --address URL Equivalent to --listen-address URL --public-address URL. --upnp Use UPnP to try to figure out the correct network settings. + --node-key KEY Specify node secret key as hex string. --cache-pref-size BYTES Specify the prefered size of the blockchain cache in bytes [default: 16384]. --cache-max-size BYTES Specify the maximum size of the blockchain cache in bytes [default: 262144]. @@ -76,7 +78,7 @@ Options: -l --logging LOGGING Specify the logging level. -v --version Show information about version. -h --help Show this screen. -", flag_cache_pref_size: usize, flag_cache_max_size: usize, flag_address: Option); +", flag_cache_pref_size: usize, flag_cache_max_size: usize, flag_address: Option, flag_node_key: Option); fn setup_log(init: &str) { let mut builder = LogBuilder::new(); @@ -144,9 +146,11 @@ impl Configuration { } fn init_nodes(&self, spec: &Spec) -> Vec { - match self.args.arg_enode.len() { - 0 => spec.nodes().clone(), - _ => self.args.arg_enode.clone(), + if self.args.flag_no_bootstrap { Vec::new() } else { + match self.args.arg_enode.len() { + 0 => spec.nodes().clone(), + _ => self.args.arg_enode.clone(), + } } } @@ -202,6 +206,7 @@ fn main() { let (listen, public) = conf.net_addresses(); net_settings.listen_address = listen; net_settings.public_address = public; + net_settings.use_secret = conf.args.flag_node_key.as_ref().map(|s| Secret::from_str(&s).expect("Invalid key string")); // Build client let mut service = ClientService::start(spec, net_settings, &Path::new(&conf.path())).unwrap(); @@ -256,7 +261,7 @@ impl Informant { let sync_info = sync.status(); if let (_, &Some(ref last_cache_info), &Some(ref last_report)) = (self.chain_info.read().unwrap().deref(), self.cache_info.read().unwrap().deref(), self.report.read().unwrap().deref()) { - println!("[ {} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, {} downloaded, {}+{} queued ···// {} ({}) bl {} ({}) ex ]", + println!("[ #{} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, #{}, {}+{} queued ···// {} ({}) bl {} ({}) ex ]", chain_info.best_block_number, chain_info.best_block_hash, (report.blocks_imported - last_report.blocks_imported) / dur, @@ -265,7 +270,7 @@ impl Informant { sync_info.num_active_peers, sync_info.num_peers, - sync_info.blocks_received, + sync_info.last_imported_block_number.unwrap_or(chain_info.best_block_number), queue_info.unverified_queue_size, queue_info.verified_queue_size, diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 66688466c..34da585f7 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -16,6 +16,6 @@ jsonrpc-http-server = "1.1" ethcore-util = { path = "../util" } ethcore = { path = "../ethcore" } ethsync = { path = "../sync" } -clippy = "0.0.37" +clippy = "0.0.41" target_info = "0.1.0" rustc-serialize = "0.3" diff --git a/sync/Cargo.toml b/sync/Cargo.toml index 75853e0ab..8eb6a1bee 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -10,7 +10,7 @@ authors = ["Ethcore MAX_BODIES_TO_REQUEST { + if needed_bodies.len() >= MAX_BODIES_TO_REQUEST { break; } let mut index: BlockNumber = 0; @@ -654,7 +654,7 @@ impl ChainSync { continue; } let mut block = prev; - while block < next && headers.len() <= MAX_HEADERS_TO_REQUEST { + while block < next && headers.len() < MAX_HEADERS_TO_REQUEST { if !self.downloading_headers.contains(&(block as BlockNumber)) { headers.push(block as BlockNumber); self.downloading_headers.insert(block as BlockNumber); @@ -1045,7 +1045,7 @@ impl ChainSync { fn check_resume(&mut self, io: &mut SyncIo) { if !io.chain().queue_info().is_full() && self.state == SyncState::Waiting { - self.state = SyncState::Idle; + self.state = SyncState::Blocks; self.continue_sync(io); } } diff --git a/sync/src/range_collection.rs b/sync/src/range_collection.rs index c70bac4ca..a07f85a7f 100644 --- a/sync/src/range_collection.rs +++ b/sync/src/range_collection.rs @@ -40,7 +40,7 @@ pub trait RangeCollection { fn get_tail(&mut self, key: &K) -> Range; /// Remove all elements < `start` in the range that contains `start` - 1 fn remove_head(&mut self, start: &K); - /// Remove all elements >= `start` in the range that contains `start` + /// Remove all elements >= `start` in the range that contains `start` fn remove_tail(&mut self, start: &K); /// Remove all elements >= `tail` fn insert_item(&mut self, key: K, value: V); @@ -168,6 +168,7 @@ impl RangeCollection for Vec<(K, Vec)> where K: Ord + PartialEq + fn insert_item(&mut self, key: K, value: V) { assert!(!self.have_item(&key)); + // todo: fix warning let lower = match self.binary_search_by(|&(k, _)| k.cmp(&key).reverse()) { Ok(index) => index, Err(index) => index, diff --git a/util/Cargo.toml b/util/Cargo.toml index 733b08701..b1e9bbc1e 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -26,7 +26,7 @@ crossbeam = "0.2" slab = { git = "https://github.com/arkpar/slab.git" } sha3 = { path = "sha3" } serde = "0.6.7" -clippy = "0.0.37" +clippy = "0.0.41" json-tests = { path = "json-tests" } target_info = "0.1.0" igd = "0.4.2" diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index d9d7b29cf..7b810639b 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -47,10 +47,10 @@ impl Clone for JournalDB { } } -const LAST_ERA_KEY : [u8; 4] = [ b'l', b'a', b's', b't' ]; +const LATEST_ERA_KEY : [u8; 4] = [ b'l', b'a', b's', b't' ]; const VERSION_KEY : [u8; 4] = [ b'j', b'v', b'e', b'r' ]; -const DB_VERSION: u32 = 1; +const DB_VERSION: u32 = 2; impl JournalDB { /// Create a new instance given a `backing` database. @@ -87,7 +87,7 @@ impl JournalDB { /// Check if this database has any commits pub fn is_empty(&self) -> bool { - self.backing.get(&LAST_ERA_KEY).expect("Low level database error").is_none() + self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() } /// Commit all recent insert operations and historical removals from the old era @@ -144,6 +144,7 @@ impl JournalDB { r.append(&inserts); r.append(&removes); try!(batch.put(&last, r.as_raw())); + try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); } // apply old commits' details @@ -181,7 +182,6 @@ impl JournalDB { try!(batch.delete(&h)); deletes += 1; } - try!(batch.put(&LAST_ERA_KEY, &encode(&end_era))); trace!("JournalDB: delete journal for time #{}.{}, (canon was {}): {} entries", end_era, index, canon_id, deletes); } @@ -228,8 +228,8 @@ impl JournalDB { fn read_counters(db: &DB) -> HashMap { let mut res = HashMap::new(); - if let Some(val) = db.get(&LAST_ERA_KEY).expect("Low-level database error.") { - let mut era = decode::(&val) + 1; + if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { + let mut era = decode::(&val); loop { let mut index = 0usize; while let Some(rlp_data) = db.get({ @@ -245,10 +245,10 @@ impl JournalDB { } index += 1; }; - if index == 0 { + if index == 0 || era == 0 { break; } - era += 1; + era -= 1; } } trace!("Recovered {} counters", res.len()); @@ -426,4 +426,32 @@ mod tests { jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); assert!(jdb.exists(&foo)); } + + #[test] + fn reopen() { + use rocksdb::DB; + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let foo = { + let mut jdb = JournalDB::new(DB::open_default(dir.to_str().unwrap()).unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + foo + }; + + { + let mut jdb = JournalDB::new(DB::open_default(dir.to_str().unwrap()).unwrap()); + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + } + + { + let mut jdb = JournalDB::new(DB::open_default(dir.to_str().unwrap()).unwrap()); + assert!(jdb.exists(&foo)); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(!jdb.exists(&foo)); + } + } } diff --git a/util/src/network/host.rs b/util/src/network/host.rs index fb1e8e1df..c1423dbb3 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -412,7 +412,7 @@ impl Host where Message: Send + Sync + Clone { let mut to_kill = Vec::new(); for e in self.connections.write().unwrap().iter_mut() { if let ConnectionEntry::Session(ref mut s) = *e.lock().unwrap().deref_mut() { - if !s.keep_alive() { + if !s.keep_alive(io) { s.disconnect(DisconnectReason::PingTimeout); to_kill.push(s.token()); } diff --git a/util/src/network/session.rs b/util/src/network/session.rs index 8e9a3a9ff..b38807c49 100644 --- a/util/src/network/session.rs +++ b/util/src/network/session.rs @@ -180,7 +180,7 @@ impl Session { } /// Keep this session alive. Returns false if ping timeout happened - pub fn keep_alive(&mut self) -> bool { + pub fn keep_alive(&mut self, io: &IoContext) -> bool where Message: Send + Sync + Clone { let timed_out = if let Some(pong) = self.pong_time_ns { pong - self.ping_time_ns > PING_TIMEOUT_SEC * 1000_000_000 } else { @@ -191,6 +191,7 @@ impl Session { if let Err(e) = self.send_ping() { debug!("Error sending ping message: {:?}", e); } + io.update_registration(self.token()).unwrap_or_else(|e| debug!(target: "net", "Session registration error: {:?}", e)); } !timed_out }