diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index 123847a7f..874fc9646 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -87,6 +87,8 @@ pub struct ClientConfig { pub blockchain: BlockChainConfig, /// Prefer journal rather than archive. pub prefer_journal: bool, + /// The name of the client instance. + pub name: String, } impl Default for ClientConfig { @@ -95,6 +97,7 @@ impl Default for ClientConfig { queue: Default::default(), blockchain: Default::default(), prefer_journal: false, + name: Default::default(), } } } @@ -193,6 +196,8 @@ pub struct ClientReport { pub transactions_applied: usize, /// How much gas has been processed so far. pub gas_processed: U256, + /// Memory used by state DB + pub state_db_mem: usize, } impl ClientReport { @@ -225,7 +230,7 @@ pub struct Client where V: Verifier { } const HISTORY: u64 = 1000; -const CLIENT_DB_VER_STR: &'static str = "4.0"; +const CLIENT_DB_VER_STR: &'static str = "5.1"; impl Client { /// Create a new client with given spec and DB path. @@ -439,7 +444,9 @@ impl Client where V: Verifier { /// Get the report. pub fn report(&self) -> ClientReport { - self.report.read().unwrap().clone() + let mut report = self.report.read().unwrap().clone(); + report.state_db_mem = self.state_db.lock().unwrap().mem_used(); + report } /// Tick the client. diff --git a/parity/main.rs b/parity/main.rs index 3f4243a0a..43b0504f1 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -53,6 +53,16 @@ use docopt::Docopt; use daemonize::Daemonize; use number_prefix::{binary_prefix, Standalone, Prefixed}; +fn die_with_message(msg: &str) -> ! { + println!("ERROR: {}", msg); + exit(1); +} + +#[macro_export] +macro_rules! die { + ($($arg:tt)*) => (die_with_message(&format!("{}", format_args!($($arg)*)))); +} + const USAGE: &'static str = r#" Parity. Ethereum Client. By Wood/Paronyan/Kotewicz/Drwięga/Volf. @@ -62,13 +72,16 @@ Usage: parity daemon [options] [ --no-bootstrap | ... ] parity [options] [ --no-bootstrap | ... ] -Options: +Protocol Options: --chain CHAIN Specify the blockchain type. CHAIN may be either a JSON chain specification file - or frontier, mainnet, morden, or testnet [default: frontier]. + or olympic, frontier, homestead, mainnet, morden, or testnet [default: homestead]. + --testnet Equivalent to --chain testnet (geth-compatible). + --networkid INDEX Override the network identifier from the chain we are on. --archive Client should not prune the state/storage trie. - -d --db-path PATH Specify the database & configuration directory path [default: $HOME/.parity] - --keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys] + -d --datadir PATH Specify the database & configuration directory path [default: $HOME/.parity] + --identity NAME Specify your node's name. +Networking Options: --no-bootstrap Don't bother trying to connect to any nodes initially. --listen-address URL Specify the IP/port on which to listen for peers [default: 0.0.0.0:30304]. --public-address URL Specify the IP/port on which peers may connect. @@ -78,18 +91,32 @@ Options: --no-upnp Disable trying to figure out the correct public adderss over UPnP. --node-key KEY Specify node secret key, either as 64-character hex string or input to SHA3 operation. +API and Console Options: + -j --jsonrpc Enable the JSON-RPC API sever. + --jsonrpc-addr HOST Specify the hostname portion of the JSONRPC API server [default: 127.0.0.1]. + --jsonrpc-port PORT Specify the port portion of the JSONRPC API server [default: 8545]. + --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses [default: null]. + --jsonrpc-apis APIS Specify the APIs available through the JSONRPC interface. APIS is a comma-delimited + list of API name. Possible name are web3, eth and net. [default: web3,eth,net]. + --rpc Equivalent to --jsonrpc (geth-compatible). + --rpcaddr HOST Equivalent to --jsonrpc-addr HOST (geth-compatible). + --rpcport PORT Equivalent to --jsonrpc-port PORT (geth-compatible). + --rpcapi APIS Equivalent to --jsonrpc-apis APIS (geth-compatible). + --rpccorsdomain URL Equivalent to --jsonrpc-cors URL (geth-compatible). + +Sealing/Mining Options: + --author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards + from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63]. + --extradata STRING Specify a custom extra-data for authored blocks, no more than 32 characters. + +Memory Footprint Options: --cache-pref-size BYTES Specify the prefered size of the blockchain cache in bytes [default: 16384]. --cache-max-size BYTES Specify the maximum size of the blockchain cache in bytes [default: 262144]. --queue-max-size BYTES Specify the maximum size of memory to use for block queue [default: 52428800]. + --cache MEGABYTES Set total amount of cache to use for the entire system, mutually exclusive with + other cache options (geth-compatible). - -j --jsonrpc Enable the JSON-RPC API sever. - --jsonrpc-url URL Specify URL for JSON-RPC API server [default: 127.0.0.1:8545]. - --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses [default: null]. - - --author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards - from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63]. - --extra-data STRING Specify a custom extra-data for authored blocks, no more than 32 characters. - +Miscellaneous Options: -l --logging LOGGING Specify the logging level. -v --version Show information about version. -h --help Show this screen. @@ -101,14 +128,18 @@ struct Args { arg_pid_file: String, arg_enode: Vec, flag_chain: String, + flag_testnet: bool, flag_db_path: String, + flag_networkid: Option, + flag_identity: String, + flag_cache: Option, flag_keys_path: String, flag_archive: bool, flag_no_bootstrap: bool, flag_listen_address: String, flag_public_address: Option, flag_address: Option, - flag_peers: u32, + flag_peers: usize, flag_no_discovery: bool, flag_no_upnp: bool, flag_node_key: Option, @@ -116,8 +147,15 @@ struct Args { flag_cache_max_size: usize, flag_queue_max_size: usize, flag_jsonrpc: bool, - flag_jsonrpc_url: String, + flag_jsonrpc_addr: String, + flag_jsonrpc_port: u16, flag_jsonrpc_cors: String, + flag_jsonrpc_apis: String, + flag_rpc: bool, + flag_rpcaddr: Option, + flag_rpcport: Option, + flag_rpccorsdomain: Option, + flag_rpcapi: Option, flag_logging: Option, flag_version: bool, flag_author: String, @@ -151,14 +189,23 @@ fn setup_log(init: &Option) { } #[cfg(feature = "rpc")] -fn setup_rpc_server(client: Arc, sync: Arc, url: &str, cors_domain: &str) { +fn setup_rpc_server(client: Arc, sync: Arc, url: &str, cors_domain: &str, apis: Vec<&str>) { use rpc::v1::*; let mut server = rpc::HttpServer::new(1); - server.add_delegate(Web3Client::new().to_delegate()); - server.add_delegate(EthClient::new(&client, &sync).to_delegate()); - server.add_delegate(EthFilterClient::new(&client).to_delegate()); - server.add_delegate(NetClient::new(&sync).to_delegate()); + for api in apis.into_iter() { + match api { + "web3" => server.add_delegate(Web3Client::new().to_delegate()), + "net" => server.add_delegate(NetClient::new(&sync).to_delegate()), + "eth" => { + server.add_delegate(EthClient::new(&client, &sync).to_delegate()); + server.add_delegate(EthFilterClient::new(&client).to_delegate()); + } + _ => { + die!("{}: Invalid API name to be enabled.", api); + } + } + } server.start_async(url, cors_domain); } @@ -179,16 +226,6 @@ By Wood/Paronyan/Kotewicz/Drwięga/Volf.\ ", version()); } -fn die_with_message(msg: &str) -> ! { - println!("ERROR: {}", msg); - exit(1); -} - -#[macro_export] -macro_rules! die { - ($($arg:tt)*) => (die_with_message(&format!("{}", format_args!($($arg)*)))); -} - struct Configuration { args: Args } @@ -221,8 +258,11 @@ impl Configuration { } fn spec(&self) -> Spec { + if self.args.flag_testnet { + return ethereum::new_morden(); + } match self.args.flag_chain.as_ref() { - "frontier" | "mainnet" => ethereum::new_frontier(), + "frontier" | "homestead" | "mainnet" => ethereum::new_frontier(), "morden" | "testnet" => ethereum::new_morden(), "olympic" => ethereum::new_olympic(), f => Spec::from_json_utf8(contents(f).unwrap_or_else(|_| die!("{}: Couldn't read chain specification file. Sure it exists?", f)).as_ref()), @@ -276,7 +316,7 @@ impl Configuration { ret.public_address = public; ret.use_secret = self.args.flag_node_key.as_ref().map(|s| Secret::from_str(&s).unwrap_or_else(|_| s.sha3())); ret.discovery_enabled = !self.args.flag_no_discovery; - ret.ideal_peers = self.args.flag_peers; + ret.ideal_peers = self.args.flag_peers as u32; let mut net_path = PathBuf::from(&self.path()); net_path.push("network"); ret.config_path = Some(net_path.to_str().unwrap().to_owned()); @@ -307,13 +347,22 @@ impl Configuration { let spec = self.spec(); let net_settings = self.net_settings(&spec); let mut sync_config = SyncConfig::default(); - sync_config.network_id = spec.network_id(); + sync_config.network_id = self.args.flag_networkid.as_ref().map(|id| U256::from_str(id).unwrap_or_else(|_| die!("{}: Invalid index given with --networkid", id))).unwrap_or(spec.network_id()); // Build client let mut client_config = ClientConfig::default(); - client_config.blockchain.pref_cache_size = self.args.flag_cache_pref_size; - client_config.blockchain.max_cache_size = self.args.flag_cache_max_size; + match self.args.flag_cache { + Some(mb) => { + client_config.blockchain.max_cache_size = mb * 1024 * 1024; + client_config.blockchain.pref_cache_size = client_config.blockchain.max_cache_size / 2; + } + None => { + client_config.blockchain.pref_cache_size = self.args.flag_cache_pref_size; + client_config.blockchain.max_cache_size = self.args.flag_cache_max_size; + } + } client_config.prefer_journal = !self.args.flag_archive; + client_config.name = self.args.flag_identity.clone(); client_config.queue.max_mem_use = self.args.flag_queue_max_size; let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap(); let client = service.client().clone(); @@ -324,9 +373,16 @@ impl Configuration { let sync = EthSync::register(service.network(), sync_config, client); // Setup rpc - if self.args.flag_jsonrpc { - setup_rpc_server(service.client(), sync.clone(), &self.args.flag_jsonrpc_url, &self.args.flag_jsonrpc_cors); - SocketAddr::from_str(&self.args.flag_jsonrpc_url).unwrap_or_else(|_|die!("{}: Invalid JSONRPC listen address given with --jsonrpc-url. Should be of the form 'IP:port'.", self.args.flag_jsonrpc_url)); + if self.args.flag_jsonrpc || self.args.flag_rpc { + let url = format!("{}:{}", + self.args.flag_rpcaddr.as_ref().unwrap_or(&self.args.flag_jsonrpc_addr), + self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port) + ); + SocketAddr::from_str(&url).unwrap_or_else(|_|die!("{}: Invalid JSONRPC listen host/port given.", url)); + let cors = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors); + // TODO: use this as the API list. + let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis); + setup_rpc_server(service.client(), sync.clone(), &url, cors, apis.split(",").collect()); } // Register IO handler @@ -395,7 +451,7 @@ impl Informant { let sync_info = sync.status(); if let (_, _, &Some(ref last_report)) = (self.chain_info.read().unwrap().deref(), self.cache_info.read().unwrap().deref(), self.report.read().unwrap().deref()) { - println!("[ #{} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, #{}, {}+{} queued ···// mem: {} chain, {} queue, {} sync ]", + println!("[ #{} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, #{}, {}+{} queued ···// mem: {} db, {} chain, {} queue, {} sync ]", chain_info.best_block_number, chain_info.best_block_hash, (report.blocks_imported - last_report.blocks_imported) / dur, @@ -408,6 +464,7 @@ impl Informant { queue_info.unverified_queue_size, queue_info.verified_queue_size, + Informant::format_bytes(report.state_db_mem), Informant::format_bytes(cache_info.total()), Informant::format_bytes(queue_info.mem_used), Informant::format_bytes(sync_info.mem_used), diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 0bc843d57..aadc264ad 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -323,7 +323,8 @@ impl EthFilter for EthFilterClient { None => Ok(Value::Array(vec![] as Vec)), Some(info) => match info.filter { PollFilter::Block => { - let current_number = client.chain_info().best_block_number; + // + 1, cause we want to return hashes including current block hash. + let current_number = client.chain_info().best_block_number + 1; let hashes = (info.block_number..current_number).into_iter() .map(BlockId::Number) .filter_map(|id| client.block_hash(id)) diff --git a/sync/src/chain.rs b/sync/src/chain.rs index a2d695e34..ed78014da 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -209,7 +209,7 @@ pub struct ChainSync { /// True if common block for our and remote chain has been found have_common_block: bool, /// Last propagated block number - last_send_block_number: BlockNumber, + last_sent_block_number: BlockNumber, /// Max blocks to download ahead max_download_ahead_blocks: usize, /// Network ID @@ -238,7 +238,7 @@ impl ChainSync { last_imported_hash: None, syncing_difficulty: U256::from(0u64), have_common_block: false, - last_send_block_number: 0, + last_sent_block_number: 0, max_download_ahead_blocks: max(MAX_HEADERS_TO_REQUEST, config.max_download_ahead_blocks), network_id: config.network_id, transaction_queue: Mutex::new(TransactionQueue::new()), @@ -910,9 +910,8 @@ impl ChainSync { } match sync.send(peer_id, packet_id, packet) { Err(e) => { - warn!(target:"sync", "Error sending request: {:?}", e); + debug!(target:"sync", "Error sending request: {:?}", e); sync.disable_peer(peer_id); - self.on_peer_aborting(sync, peer_id); } Ok(_) => { let mut peer = self.peers.get_mut(&peer_id).unwrap(); @@ -925,9 +924,8 @@ impl ChainSync { /// Generic packet sender fn send_packet(&mut self, sync: &mut SyncIo, peer_id: PeerId, packet_id: PacketId, packet: Bytes) { if let Err(e) = sync.send(peer_id, packet_id, packet) { - warn!(target:"sync", "Error sending packet: {:?}", e); + debug!(target:"sync", "Error sending packet: {:?}", e); sync.disable_peer(peer_id); - self.on_peer_aborting(sync, peer_id); } } /// Called when peer sends us new transactions @@ -1251,26 +1249,25 @@ impl ChainSync { sent } + fn propagate_latest_blocks(&mut self, io: &mut SyncIo) { + let chain_info = io.chain().chain_info(); + if (((chain_info.best_block_number as i64) - (self.last_sent_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION { + let blocks = self.propagate_blocks(&chain_info, io); + let hashes = self.propagate_new_hashes(&chain_info, io); + if blocks != 0 || hashes != 0 { + trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes); + } + } + self.last_sent_block_number = chain_info.best_block_number; + } + /// Maintain other peers. Send out any new blocks and transactions pub fn maintain_sync(&mut self, io: &mut SyncIo) { self.check_resume(io); } - /// should be called once chain has new block, triggers the latest block propagation - pub fn chain_blocks_verified(&mut self, io: &mut SyncIo) { - let chain = io.chain().chain_info(); - if (((chain.best_block_number as i64) - (self.last_send_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION { - let blocks = self.propagate_blocks(&chain, io); - let hashes = self.propagate_new_hashes(&chain, io); - if blocks != 0 || hashes != 0 { - trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes); - } - } - self.last_send_block_number = chain.best_block_number; - } - - /// called when block is imported to chain, updates transactions queue - pub fn chain_new_blocks(&mut self, io: &SyncIo, good: &[H256], bad: &[H256], _retracted: &[H256]) { + /// called when block is imported to chain, updates transactions queue and propagates the blocks + pub fn chain_new_blocks(&mut self, io: &mut SyncIo, good: &[H256], bad: &[H256], _retracted: &[H256]) { fn fetch_transactions(chain: &BlockChainClient, hash: &H256) -> Vec { let block = chain .block(BlockId::Hash(hash.clone())) @@ -1281,24 +1278,31 @@ impl ChainSync { } - let chain = io.chain(); - let good = good.par_iter().map(|h| fetch_transactions(chain, h)); - let bad = bad.par_iter().map(|h| fetch_transactions(chain, h)); + { + let chain = io.chain(); + let good = good.par_iter().map(|h| fetch_transactions(chain, h)); + let bad = bad.par_iter().map(|h| fetch_transactions(chain, h)); - good.for_each(|txs| { - let mut transaction_queue = self.transaction_queue.lock().unwrap(); - let hashes = txs.iter().map(|tx| tx.hash()).collect::>(); - transaction_queue.remove_all(&hashes, |a| chain.nonce(a)); - }); - bad.for_each(|txs| { - // populate sender - for tx in &txs { - let _sender = tx.sender(); - } - let mut transaction_queue = self.transaction_queue.lock().unwrap(); - transaction_queue.add_all(txs, |a| chain.nonce(a)); - }); + good.for_each(|txs| { + let mut transaction_queue = self.transaction_queue.lock().unwrap(); + let hashes = txs.iter().map(|tx| tx.hash()).collect::>(); + transaction_queue.remove_all(&hashes, |a| chain.nonce(a)); + }); + bad.for_each(|txs| { + // populate sender + for tx in &txs { + let _sender = tx.sender(); + } + let mut transaction_queue = self.transaction_queue.lock().unwrap(); + transaction_queue.add_all(txs, |a| chain.nonce(a)); + }); + } + + // Propagate latests blocks + self.propagate_latest_blocks(io); + // TODO [todr] propagate transactions? } + } #[cfg(test)] @@ -1637,13 +1641,13 @@ mod tests { let retracted_blocks = vec![client.block_hash_delta_minus(1)]; let mut queue = VecDeque::new(); - let io = TestIo::new(&mut client, &mut queue, None); + let mut io = TestIo::new(&mut client, &mut queue, None); // when - sync.chain_new_blocks(&io, &[], &good_blocks, &[]); + sync.chain_new_blocks(&mut io, &[], &good_blocks, &[]); assert_eq!(sync.transaction_queue.lock().unwrap().status().future, 0); assert_eq!(sync.transaction_queue.lock().unwrap().status().pending, 1); - sync.chain_new_blocks(&io, &good_blocks, &retracted_blocks, &[]); + sync.chain_new_blocks(&mut io, &good_blocks, &retracted_blocks, &[]); // then let status = sync.transaction_queue.lock().unwrap().status(); diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 8a30385a2..b5869642c 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -154,13 +154,11 @@ impl NetworkProtocolHandler for EthSync { fn message(&self, io: &NetworkContext, message: &SyncMessage) { match *message { - SyncMessage::BlockVerified => { - self.sync.write().unwrap().chain_blocks_verified(&mut NetSyncIo::new(io, self.chain.deref())); - }, SyncMessage::NewChainBlocks { ref good, ref bad, ref retracted } => { - let sync_io = NetSyncIo::new(io, self.chain.deref()); - self.sync.write().unwrap().chain_new_blocks(&sync_io, good, bad, retracted); - } + let mut sync_io = NetSyncIo::new(io, self.chain.deref()); + self.sync.write().unwrap().chain_new_blocks(&mut sync_io, good, bad, retracted); + }, + _ => {/* Ignore other messages */}, } } } diff --git a/sync/src/tests/chain.rs b/sync/src/tests/chain.rs index 58f50916e..855aa79a6 100644 --- a/sync/src/tests/chain.rs +++ b/sync/src/tests/chain.rs @@ -129,8 +129,8 @@ fn propagate_hashes() { net.peer_mut(0).chain.add_blocks(10, EachBlockWith::Uncle); net.sync(); - net.trigger_block_verified(0); //first event just sets the marker - net.trigger_block_verified(0); + net.trigger_chain_new_blocks(0); //first event just sets the marker + net.trigger_chain_new_blocks(0); // 5 peers to sync assert_eq!(5, net.peer(0).queue.len()); @@ -154,8 +154,8 @@ fn propagate_blocks() { net.sync(); net.peer_mut(0).chain.add_blocks(10, EachBlockWith::Uncle); - net.trigger_block_verified(0); //first event just sets the marker - net.trigger_block_verified(0); + net.trigger_chain_new_blocks(0); //first event just sets the marker + net.trigger_chain_new_blocks(0); assert!(!net.peer(0).queue.is_empty()); // NEW_BLOCK_PACKET diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index 5b53ad90b..d01dba0b2 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -455,8 +455,8 @@ impl TestNet { self.peers.iter().all(|p| p.queue.is_empty()) } - pub fn trigger_block_verified(&mut self, peer_id: usize) { + pub fn trigger_chain_new_blocks(&mut self, peer_id: usize) { let mut peer = self.peer_mut(peer_id); - peer.sync.chain_blocks_verified(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None)); + peer.sync.chain_new_blocks(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None), &[], &[], &[]); } } diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 01e53f819..d0d7c05ff 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -25,10 +25,7 @@ use kvdb::{Database, DBTransaction, DatabaseConfig}; use std::env; /// Implementation of the HashDB trait for a disk-backed database with a memory overlay -/// and, possibly, latent-removal semantics. -/// -/// If `counters` is `None`, then it behaves exactly like OverlayDB. If not it behaves -/// differently: +/// and latent-removal semantics. /// /// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to /// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect @@ -60,7 +57,6 @@ const DB_VERSION_NO_JOURNAL : u32 = 3 + 256; const PADDING : [u8; 10] = [ 0u8; 10 ]; impl JournalDB { - /// Create a new instance from file pub fn new(path: &str) -> JournalDB { Self::from_prefs(path, true) @@ -149,6 +145,87 @@ impl JournalDB { Ok(ret as u32) } + fn morph_key(key: &H256, index: u8) -> Bytes { + let mut ret = key.bytes().to_owned(); + ret.push(index); + ret + } + + // The next three are valid only as long as there is an insert operation of `key` in the journal. + fn set_already_in(batch: &DBTransaction, key: &H256) { batch.put(&Self::morph_key(key, 0), &[1u8]).expect("Low-level database error. Some issue with your hard disk?"); } + fn reset_already_in(batch: &DBTransaction, key: &H256) { batch.delete(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?"); } + fn is_already_in(backing: &Database, key: &H256) -> bool { + backing.get(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some() + } + + fn insert_keys(inserts: &Vec<(H256, Bytes)>, backing: &Database, counters: &mut HashMap, batch: &DBTransaction) { + for &(ref h, ref d) in inserts { + if let Some(c) = counters.get_mut(h) { + // already counting. increment. + *c += 1; + continue; + } + + // this is the first entry for this node in the journal. + if backing.get(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?").is_some() { + // already in the backing DB. start counting, and remember it was already in. + Self::set_already_in(batch, &h); + counters.insert(h.clone(), 1); + continue; + } + + // Gets removed when a key leaves the journal, so should never be set when we're placing a new key. + //Self::reset_already_in(&h); + assert!(!Self::is_already_in(backing, &h)); + batch.put(&h.bytes(), d).expect("Low-level database error. Some issue with your hard disk?"); + } + } + + fn replay_keys(inserts: &Vec, backing: &Database, counters: &mut HashMap) { + trace!("replay_keys: inserts={:?}, counters={:?}", inserts, counters); + for h in inserts { + if let Some(c) = counters.get_mut(h) { + // already counting. increment. + *c += 1; + continue; + } + + // this is the first entry for this node in the journal. + // it is initialised to 1 if it was already in. + if Self::is_already_in(backing, h) { + trace!("replace_keys: Key {} was already in!", h); + counters.insert(h.clone(), 1); + } + } + trace!("replay_keys: (end) counters={:?}", counters); + } + + fn kill_keys(deletes: Vec, counters: &mut HashMap, batch: &DBTransaction) { + for h in deletes.into_iter() { + let mut n: Option = None; + if let Some(c) = counters.get_mut(&h) { + if *c > 1 { + *c -= 1; + continue; + } else { + n = Some(*c); + } + } + match &n { + &Some(i) if i == 1 => { + counters.remove(&h); + Self::reset_already_in(batch, &h); + } + &None => { + // Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs. + //assert!(!Self::is_already_in(db, &h)); + batch.delete(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?"); + } + _ => panic!("Invalid value in counters: {:?}", n), + } + } + } + /// Commit all recent insert operations and historical removals from the old era /// to the backing database. fn commit_with_counters(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { @@ -159,60 +236,85 @@ impl JournalDB { // TODO: store reclaim_period. - // when we make a new commit, we journal the inserts and removes. - // for each end_era that we journaled that we are no passing by, - // we remove all of its removes assuming it is canonical and all - // of its inserts otherwise. + // When we make a new commit, we make a journal of all blocks in the recent history and record + // all keys that were inserted and deleted. The journal is ordered by era; multiple commits can + // share the same era. This forms a data structure similar to a queue but whose items are tuples. + // By the time comes to remove a tuple from the queue (i.e. then the era passes from recent history + // into ancient history) then only one commit from the tuple is considered canonical. This commit + // is kept in the main backing database, whereas any others from the same era are reverted. + // + // It is possible that a key, properly available in the backing database be deleted and re-inserted + // in the recent history queue, yet have both operations in commits that are eventually non-canonical. + // To avoid the original, and still required, key from being deleted, we maintain a reference count + // which includes an original key, if any. + // + // The semantics of the `counter` are: + // insert key k: + // counter already contains k: count += 1 + // counter doesn't contain k: + // backing db contains k: count = 1 + // backing db doesn't contain k: insert into backing db, count = 0 + // delete key k: + // counter contains k (count is asserted to be non-zero): + // count > 1: counter -= 1 + // count == 1: remove counter + // count == 0: remove key from backing db + // counter doesn't contain k: remove key from backing db // - // We also keep reference counters for each key inserted in the journal to handle - // the following cases where key K must not be deleted from the DB when processing removals : - // Given H is the journal size in eras, 0 <= C <= H. - // Key K is removed in era A(N) and re-inserted in canonical era B(N + C). - // Key K is removed in era A(N) and re-inserted in non-canonical era B`(N + C). - // Key K is added in non-canonical era A'(N) canonical B(N + C). + // Practically, this means that for each commit block turning from recent to ancient we do the + // following: + // is_canonical: + // inserts: Ignored (left alone in the backing database). + // deletes: Enacted; however, recent history queue is checked for ongoing references. This is + // reduced as a preference to deletion from the backing database. + // !is_canonical: + // inserts: Reverted; however, recent history queue is checked for ongoing references. This is + // reduced as a preference to deletion from the backing database. + // deletes: Ignored (they were never inserted). // - // The counter is encreased each time a key is inserted in the journal in the commit. The list of insertions - // is saved with the era record. When the era becomes end_era and goes out of journal the counter is decreased - // and the key is safe to delete. // record new commit's details. - trace!("commit: #{} ({}), end era: {:?}", now, id, end); + trace!("commit: #{} ({}), end era: {:?}", now, id, end); let mut counters = self.counters.as_ref().unwrap().write().unwrap(); let batch = DBTransaction::new(); { let mut index = 0usize; let mut last; - while { - let record = try!(self.backing.get({ - let mut r = RlpStream::new_list(3); - r.append(&now); - r.append(&index); - r.append(&&PADDING[..]); - last = r.drain(); - &last - })); - match record { - Some(r) => { - assert!(&Rlp::new(&r).val_at::(0) != id); - true - }, - None => false, - } - } { + while try!(self.backing.get({ + let mut r = RlpStream::new_list(3); + r.append(&now); + r.append(&index); + r.append(&&PADDING[..]); + last = r.drain(); + &last + })).is_some() { index += 1; } + let drained = self.overlay.drain(); + let removes: Vec = drained + .iter() + .filter_map(|(ref k, &(_, ref c))| if *c < 0 {Some(k.clone())} else {None}).cloned() + .collect(); + let inserts: Vec<(H256, Bytes)> = drained + .into_iter() + .filter_map(|(k, (v, r))| if r > 0 { assert!(r == 1); Some((k, v)) } else { assert!(r >= -1); None }) + .collect(); + let mut r = RlpStream::new_list(3); - let inserts: Vec = self.overlay.keys().iter().filter(|&(_, &c)| c > 0).map(|(key, _)| key.clone()).collect(); - // Increase counter for each inserted key no matter if the block is canonical or not. - for i in &inserts { - *counters.entry(i.clone()).or_insert(0) += 1; - } - let removes: Vec = self.overlay.keys().iter().filter(|&(_, &c)| c < 0).map(|(key, _)| key.clone()).collect(); r.append(id); - r.append(&inserts); + + // Process the new inserts. + // We use the inserts for three things. For each: + // - we place into the backing DB or increment the counter if already in; + // - we note in the backing db that it was already in; + // - we write the key into our journal for this block; + + r.begin_list(inserts.len()); + inserts.iter().foreach(|&(k, _)| {r.append(&k);}); r.append(&removes); + Self::insert_keys(&inserts, &self.backing, &mut counters, &batch); try!(batch.put(&last, r.as_raw())); try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); } @@ -221,8 +323,6 @@ impl JournalDB { if let Some((end_era, canon_id)) = end { let mut index = 0usize; let mut last; - let mut to_remove: Vec = Vec::new(); - let mut canon_inserts: Vec = Vec::new(); while let Some(rlp_data) = try!(self.backing.get({ let mut r = RlpStream::new_list(3); r.append(&end_era); @@ -232,54 +332,19 @@ impl JournalDB { &last })) { let rlp = Rlp::new(&rlp_data); - let mut inserts: Vec = rlp.val_at(1); - JournalDB::decrease_counters(&inserts, &mut counters); + let inserts: Vec = rlp.val_at(1); + let deletes: Vec = rlp.val_at(2); // Collect keys to be removed. These are removed keys for canonical block, inserted for non-canonical - if canon_id == rlp.val_at(0) { - let mut canon_deletes: Vec = rlp.val_at(2); - trace!("Purging nodes deleted from canon: {:?}", canon_deletes); - to_remove.append(&mut canon_deletes); - canon_inserts = inserts; - } - else { - trace!("Purging nodes inserted in non-canon: {:?}", inserts); - to_remove.append(&mut inserts); - } - trace!("commit: Delete journal for time #{}.{}: {}, (canon was {}): {} entries", end_era, index, rlp.val_at::(0), canon_id, to_remove.len()); + Self::kill_keys(if canon_id == rlp.val_at(0) {deletes} else {inserts}, &mut counters, &batch); try!(batch.delete(&last)); index += 1; } - - let canon_inserts = canon_inserts.drain(..).collect::>(); - // Purge removed keys if they are not referenced and not re-inserted in the canon commit - let mut deletes = 0; - trace!("Purging filtered nodes: {:?}", to_remove.iter().filter(|h| !counters.contains_key(h) && !canon_inserts.contains(h)).collect::>()); - for h in to_remove.iter().filter(|h| !counters.contains_key(h) && !canon_inserts.contains(h)) { - try!(batch.delete(&h)); - deletes += 1; - } - trace!("Total nodes purged: {}", deletes); + trace!("JournalDB: delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id); } - // Commit overlay insertions - let ret = Self::batch_overlay_insertions(&mut self.overlay, &batch); try!(self.backing.write(batch)); - Ok(ret as u32) - } - - - // Decrease counters for given keys. Deletes obsolete counters - fn decrease_counters(keys: &[H256], counters: &mut HashMap) { - for i in keys.iter() { - let delete_counter = { - let cnt = counters.get_mut(i).expect("Missing key counter"); - *cnt -= 1; - *cnt == 0 - }; - if delete_counter { - counters.remove(i); - } - } +// trace!("JournalDB::commit() deleted {} nodes", deletes); + Ok(0) } fn payload(&self, key: &H256) -> Option { @@ -287,7 +352,7 @@ impl JournalDB { } fn read_counters(db: &Database) -> HashMap { - let mut res = HashMap::new(); + let mut counters = HashMap::new(); if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { let mut era = decode::(&val); loop { @@ -299,11 +364,10 @@ impl JournalDB { r.append(&&PADDING[..]); &r.drain() }).expect("Low-level database error.") { + trace!("read_counters: era={}, index={}", era, index); let rlp = Rlp::new(&rlp_data); - let to_add: Vec = rlp.val_at(1); - for h in to_add { - *res.entry(h).or_insert(0) += 1; - } + let inserts: Vec = rlp.val_at(1); + Self::replay_keys(&inserts, db, &mut counters); index += 1; }; if index == 0 || era == 0 { @@ -312,10 +376,15 @@ impl JournalDB { era -= 1; } } - trace!("Recovered {} counters", res.len()); - res + trace!("Recovered {} counters", counters.len()); + counters } -} + + /// Returns heap memory size used + pub fn mem_used(&self) -> usize { + self.overlay.mem_used() + match &self.counters { &Some(ref c) => c.read().unwrap().heap_size_of_children(), &None => 0 } + } + } impl HashDB for JournalDB { fn keys(&self) -> HashMap { @@ -368,6 +437,28 @@ mod tests { use super::*; use hashdb::*; + #[test] + fn insert_same_in_fork() { + // history is 1 + let mut jdb = JournalDB::new_temp(); + + let x = jdb.insert(b"X"); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); + + jdb.remove(&x); + jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); + let x = jdb.insert(b"X"); + jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); + + jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); + jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); + + assert!(jdb.exists(&x)); + } + #[test] fn long_history() { // history is 3 @@ -488,15 +579,18 @@ mod tests { assert!(jdb.exists(&foo)); } + #[test] fn reopen() { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); + let bar = H256::random(); let foo = { let mut jdb = JournalDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); + jdb.emplace(bar.clone(), b"bar".to_vec()); jdb.commit(0, &b"0".sha3(), None).unwrap(); foo }; @@ -510,8 +604,68 @@ mod tests { { let mut jdb = JournalDB::new(dir.to_str().unwrap()); assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(!jdb.exists(&foo)); } } + + #[test] + fn reopen_remove() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let bar = H256::random(); + + let foo = { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + + // foo is ancient history. + + jdb.insert(b"foo"); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + foo + }; + + { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + jdb.remove(&foo); + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + assert!(!jdb.exists(&foo)); + } + } + #[test] + fn reopen_fork() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let (foo, bar, baz) = { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + (foo, bar, baz) + }; + + { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&baz)); + assert!(!jdb.exists(&bar)); + } + } } diff --git a/util/src/memorydb.rs b/util/src/memorydb.rs index 680a6e1d0..9cd018935 100644 --- a/util/src/memorydb.rs +++ b/util/src/memorydb.rs @@ -21,6 +21,7 @@ use bytes::*; use rlp::*; use sha3::*; use hashdb::*; +use heapsize::*; use std::mem; use std::collections::HashMap; @@ -143,6 +144,11 @@ impl MemoryDB { } self.raw(key).unwrap() } + + /// Returns the size of allocated heap memory + pub fn mem_used(&self) -> usize { + self.data.heap_size_of_children() + } } static NULL_RLP_STATIC: [u8; 1] = [0x80; 1]; diff --git a/util/src/network/connection.rs b/util/src/network/connection.rs index 55e688c91..fe65be6d1 100644 --- a/util/src/network/connection.rs +++ b/util/src/network/connection.rs @@ -190,25 +190,25 @@ impl Connection { /// Register this connection with the IO event loop. pub fn register_socket(&self, reg: Token, event_loop: &mut EventLoop) -> io::Result<()> { - trace!(target: "net", "connection register; token={:?}", reg); + trace!(target: "network", "connection register; token={:?}", reg); if let Err(e) = event_loop.register(&self.socket, reg, self.interest, PollOpt::edge() | PollOpt::oneshot()) { - debug!("Failed to register {:?}, {:?}", reg, e); + trace!(target: "network", "Failed to register {:?}, {:?}", reg, e); } Ok(()) } /// Update connection registration. Should be called at the end of the IO handler. pub fn update_socket(&self, reg: Token, event_loop: &mut EventLoop) -> io::Result<()> { - trace!(target: "net", "connection reregister; token={:?}", reg); + trace!(target: "network", "connection reregister; token={:?}", reg); event_loop.reregister( &self.socket, reg, self.interest, PollOpt::edge() | PollOpt::oneshot()).or_else(|e| { - debug!("Failed to reregister {:?}, {:?}", reg, e); + trace!(target: "network", "Failed to reregister {:?}, {:?}", reg, e); Ok(()) }) } /// Delete connection registration. Should be called at the end of the IO handler. pub fn deregister_socket(&self, event_loop: &mut EventLoop) -> io::Result<()> { - trace!(target: "net", "connection deregister; token={:?}", self.token); + trace!(target: "network", "connection deregister; token={:?}", self.token); event_loop.deregister(&self.socket).ok(); // ignore errors here Ok(()) } diff --git a/util/src/network/handshake.rs b/util/src/network/handshake.rs index cca133ba9..a72cc28ad 100644 --- a/util/src/network/handshake.rs +++ b/util/src/network/handshake.rs @@ -222,7 +222,7 @@ impl Handshake { /// Parse, validate and confirm auth message fn read_auth(&mut self, secret: &Secret, data: &[u8]) -> Result<(), UtilError> { - trace!(target:"net", "Received handshake auth from {:?}", self.connection.socket.peer_addr()); + trace!(target:"network", "Received handshake auth from {:?}", self.connection.socket.peer_addr()); if data.len() != V4_AUTH_PACKET_SIZE { debug!(target:"net", "Wrong auth packet size"); return Err(From::from(NetworkError::BadProtocol)); @@ -253,7 +253,7 @@ impl Handshake { } fn read_auth_eip8(&mut self, secret: &Secret, data: &[u8]) -> Result<(), UtilError> { - trace!(target:"net", "Received EIP8 handshake auth from {:?}", self.connection.socket.peer_addr()); + trace!(target:"network", "Received EIP8 handshake auth from {:?}", self.connection.socket.peer_addr()); self.auth_cipher.extend_from_slice(data); let auth = try!(ecies::decrypt(secret, &self.auth_cipher[0..2], &self.auth_cipher[2..])); let rlp = UntrustedRlp::new(&auth); @@ -268,7 +268,7 @@ impl Handshake { /// Parse and validate ack message fn read_ack(&mut self, secret: &Secret, data: &[u8]) -> Result<(), UtilError> { - trace!(target:"net", "Received handshake auth to {:?}", self.connection.socket.peer_addr()); + trace!(target:"network", "Received handshake auth to {:?}", self.connection.socket.peer_addr()); if data.len() != V4_ACK_PACKET_SIZE { debug!(target:"net", "Wrong ack packet size"); return Err(From::from(NetworkError::BadProtocol)); @@ -296,7 +296,7 @@ impl Handshake { } fn read_ack_eip8(&mut self, secret: &Secret, data: &[u8]) -> Result<(), UtilError> { - trace!(target:"net", "Received EIP8 handshake auth from {:?}", self.connection.socket.peer_addr()); + trace!(target:"network", "Received EIP8 handshake auth from {:?}", self.connection.socket.peer_addr()); self.ack_cipher.extend_from_slice(data); let ack = try!(ecies::decrypt(secret, &self.ack_cipher[0..2], &self.ack_cipher[2..])); let rlp = UntrustedRlp::new(&ack); @@ -309,7 +309,7 @@ impl Handshake { /// Sends auth message fn write_auth(&mut self, secret: &Secret, public: &Public) -> Result<(), UtilError> { - trace!(target:"net", "Sending handshake auth to {:?}", self.connection.socket.peer_addr()); + trace!(target:"network", "Sending handshake auth to {:?}", self.connection.socket.peer_addr()); let mut data = [0u8; /*Signature::SIZE*/ 65 + /*H256::SIZE*/ 32 + /*Public::SIZE*/ 64 + /*H256::SIZE*/ 32 + 1]; //TODO: use associated constants let len = data.len(); { @@ -336,7 +336,7 @@ impl Handshake { /// Sends ack message fn write_ack(&mut self) -> Result<(), UtilError> { - trace!(target:"net", "Sending handshake ack to {:?}", self.connection.socket.peer_addr()); + trace!(target:"network", "Sending handshake ack to {:?}", self.connection.socket.peer_addr()); let mut data = [0u8; 1 + /*Public::SIZE*/ 64 + /*H256::SIZE*/ 32]; //TODO: use associated constants let len = data.len(); { @@ -355,7 +355,7 @@ impl Handshake { /// Sends EIP8 ack message fn write_ack_eip8(&mut self) -> Result<(), UtilError> { - trace!(target:"net", "Sending EIP8 handshake ack to {:?}", self.connection.socket.peer_addr()); + trace!(target:"network", "Sending EIP8 handshake ack to {:?}", self.connection.socket.peer_addr()); let mut rlp = RlpStream::new_list(3); rlp.append(self.ecdhe.public()); rlp.append(&self.nonce); diff --git a/util/src/network/host.rs b/util/src/network/host.rs index f2cc9fe48..ece24a1d1 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -170,29 +170,37 @@ pub struct NetworkContext<'s, Message> where Message: Send + Sync + Clone + 'sta io: &'s IoContext>, protocol: ProtocolId, sessions: Arc>>, - session: Option, + session: Option, + session_id: Option, } impl<'s, Message> NetworkContext<'s, Message> where Message: Send + Sync + Clone + 'static, { /// Create a new network IO access point. Takes references to all the data that can be updated within the IO handler. fn new(io: &'s IoContext>, protocol: ProtocolId, - session: Option, sessions: Arc>>) -> NetworkContext<'s, Message> { + session: Option, sessions: Arc>>) -> NetworkContext<'s, Message> { + let id = session.as_ref().map(|s| s.lock().unwrap().token()); NetworkContext { io: io, protocol: protocol, + session_id: id, session: session, sessions: sessions, } } + fn resolve_session(&self, peer: PeerId) -> Option { + match self.session_id { + Some(id) if id == peer => self.session.clone(), + _ => self.sessions.read().unwrap().get(peer).cloned(), + } + } + /// Send a packet over the network to another peer. pub fn send(&self, peer: PeerId, packet_id: PacketId, data: Vec) -> Result<(), UtilError> { - let session = { self.sessions.read().unwrap().get(peer).cloned() }; + let session = self.resolve_session(peer); if let Some(session) = session { - session.lock().unwrap().deref_mut().send_packet(self.protocol, packet_id as u8, &data).unwrap_or_else(|e| { - warn!(target: "network", "Send error: {:?}", e); - }); //TODO: don't copy vector data + try!(session.lock().unwrap().deref_mut().send_packet(self.protocol, packet_id as u8, &data)); try!(self.io.update_registration(peer)); } else { trace!(target: "network", "Send: Peer no longer exist") @@ -200,14 +208,10 @@ impl<'s, Message> NetworkContext<'s, Message> where Message: Send + Sync + Clone Ok(()) } - /// Respond to a current network message. Panics if no there is no packet in the context. + /// Respond to a current network message. Panics if no there is no packet in the context. If the session is expired returns nothing. pub fn respond(&self, packet_id: PacketId, data: Vec) -> Result<(), UtilError> { - match self.session { - Some(session) => self.send(session, packet_id, data), - None => { - panic!("Respond: Session does not exist") - } - } + assert!(self.session.is_some(), "Respond called without network context"); + self.send(self.session_id.unwrap(), packet_id, data) } /// Send an IO message @@ -215,7 +219,6 @@ impl<'s, Message> NetworkContext<'s, Message> where Message: Send + Sync + Clone self.io.message(NetworkIoMessage::User(msg)); } - /// Disable current protocol capability for given peer. If no capabilities left peer gets disconnected. pub fn disable_peer(&self, peer: PeerId) { //TODO: remove capability, disconnect if no capabilities left @@ -239,7 +242,7 @@ impl<'s, Message> NetworkContext<'s, Message> where Message: Send + Sync + Clone /// Returns peer identification string pub fn peer_info(&self, peer: PeerId) -> String { - let session = { self.sessions.read().unwrap().get(peer).cloned() }; + let session = self.resolve_session(peer); if let Some(session) = session { return session.lock().unwrap().info.client_version.clone() } @@ -624,7 +627,7 @@ impl Host where Message: Send + Sync + Clone { let mut packet_data: Option<(ProtocolId, PacketId, Vec)> = None; let mut kill = false; let session = { self.sessions.read().unwrap().get(token).cloned() }; - if let Some(session) = session { + if let Some(session) = session.clone() { let mut s = session.lock().unwrap(); match s.readable(io, &self.info.read().unwrap()) { Err(e) => { @@ -656,11 +659,11 @@ impl Host where Message: Send + Sync + Clone { } for p in ready_data { let h = self.handlers.read().unwrap().get(p).unwrap().clone(); - h.connected(&NetworkContext::new(io, p, Some(token), self.sessions.clone()), &token); + h.connected(&NetworkContext::new(io, p, session.clone(), self.sessions.clone()), &token); } if let Some((p, packet_id, data)) = packet_data { let h = self.handlers.read().unwrap().get(p).unwrap().clone(); - h.read(&NetworkContext::new(io, p, Some(token), self.sessions.clone()), &token, packet_id, &data[1..]); + h.read(&NetworkContext::new(io, p, session.clone(), self.sessions.clone()), &token, packet_id, &data[1..]); } io.update_registration(token).unwrap_or_else(|e| debug!(target: "network", "Token registration error: {:?}", e)); } @@ -718,6 +721,7 @@ impl Host where Message: Send + Sync + Clone { let mut to_disconnect: Vec = Vec::new(); let mut failure_id = None; let mut deregister = false; + let mut expired_session = None; match token { FIRST_HANDSHAKE ... LAST_HANDSHAKE => { let handshakes = self.handshakes.write().unwrap(); @@ -733,6 +737,7 @@ impl Host where Message: Send + Sync + Clone { FIRST_SESSION ... LAST_SESSION => { let sessions = self.sessions.write().unwrap(); if let Some(session) = sessions.get(token).cloned() { + expired_session = Some(session.clone()); let mut s = session.lock().unwrap(); if !s.expired() { if s.is_ready() { @@ -757,7 +762,7 @@ impl Host where Message: Send + Sync + Clone { } for p in to_disconnect { let h = self.handlers.read().unwrap().get(p).unwrap().clone(); - h.disconnected(&NetworkContext::new(io, p, Some(token), self.sessions.clone()), &token); + h.disconnected(&NetworkContext::new(io, p, expired_session.clone(), self.sessions.clone()), &token); } if deregister { io.deregister_stream(token).expect("Error deregistering stream"); diff --git a/util/src/network/session.rs b/util/src/network/session.rs index edf929a9a..84c063c92 100644 --- a/util/src/network/session.rs +++ b/util/src/network/session.rs @@ -213,6 +213,9 @@ impl Session { /// Send a protocol packet to peer. pub fn send_packet(&mut self, protocol: &str, packet_id: u8, data: &[u8]) -> Result<(), UtilError> { + if self.expired() { + return Err(From::from(NetworkError::Expired)); + } let mut i = 0usize; while protocol != self.info.capabilities[i].protocol { i += 1; @@ -351,15 +354,15 @@ impl Session { offset += caps[i].packet_count; i += 1; } - trace!(target: "net", "Hello: {} v{} {} {:?}", client_version, protocol, id, caps); + trace!(target: "network", "Hello: {} v{} {} {:?}", client_version, protocol, id, caps); self.info.client_version = client_version; self.info.capabilities = caps; if self.info.capabilities.is_empty() { - trace!("No common capabilities with peer."); + trace!(target: "network", "No common capabilities with peer."); return Err(From::from(self.disconnect(DisconnectReason::UselessPeer))); } if protocol != host.protocol_version { - trace!("Peer protocol version mismatch: {}", protocol); + trace!(target: "network", "Peer protocol version mismatch: {}", protocol); return Err(From::from(self.disconnect(DisconnectReason::UselessPeer))); } self.had_hello = true;