diff --git a/Cargo.lock b/Cargo.lock index 2ebf9b854..d9e5ced66 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -569,6 +569,7 @@ name = "ethkey" version = "0.2.0" dependencies = [ "bigint 0.1.0", + "docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -580,6 +581,7 @@ dependencies = [ name = "ethstore" version = "0.1.0" dependencies = [ + "docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)", "ethcrypto 0.1.0", "ethkey 0.2.0", "itertools 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/Cargo.toml b/Cargo.toml index d8e42bf3a..112a36312 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,6 +63,8 @@ ipc = ["ethcore/ipc"] dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethcore-dapps/dev", "ethcore-signer/dev"] json-tests = ["ethcore/json-tests"] stratum = ["ipc"] +ethkey-cli = ["ethcore/ethkey-cli"] +ethstore-cli = ["ethcore/ethstore-cli"] [[bin]] path = "parity/main.rs" diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index 8acba2266..fe6a682cb 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -51,3 +51,5 @@ dev = ["clippy"] default = [] benches = [] ipc = [] +ethkey-cli = ["ethkey/cli"] +ethstore-cli = ["ethstore/cli"] diff --git a/ethcore/build.rs b/ethcore/build.rs index 2e07cbc2f..b83955708 100644 --- a/ethcore/build.rs +++ b/ethcore/build.rs @@ -19,5 +19,6 @@ extern crate ethcore_ipc_codegen; fn main() { ethcore_ipc_codegen::derive_binary("src/types/mod.rs.in").unwrap(); ethcore_ipc_codegen::derive_ipc("src/client/traits.rs").unwrap(); + ethcore_ipc_codegen::derive_ipc("src/snapshot/snapshot_service_trait.rs").unwrap(); ethcore_ipc_codegen::derive_ipc("src/client/chain_notify.rs").unwrap(); } diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs index 7d686cec0..c441136fd 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/block_queue.rs @@ -37,7 +37,7 @@ const MIN_MEM_LIMIT: usize = 16384; const MIN_QUEUE_LIMIT: usize = 512; /// Block queue configuration -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub struct BlockQueueConfig { /// Maximum number of blocks to keep in unverified queue. /// When the limit is reached, is_full returns true. diff --git a/ethcore/src/blockchain/config.rs b/ethcore/src/blockchain/config.rs index 1a0ab9d42..324474958 100644 --- a/ethcore/src/blockchain/config.rs +++ b/ethcore/src/blockchain/config.rs @@ -17,7 +17,7 @@ //! Blockchain configuration. /// Blockchain configuration. -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub struct Config { /// Preferred cache size in bytes. pub pref_cache_size: usize, diff --git a/ethcore/src/client/chain_notify.rs b/ethcore/src/client/chain_notify.rs index e4638f152..f87bcd378 100644 --- a/ethcore/src/client/chain_notify.rs +++ b/ethcore/src/client/chain_notify.rs @@ -20,7 +20,7 @@ use util::H256; /// Represents what has to be handled by actor listening to chain events #[derive(Ipc)] pub trait ChainNotify : Send + Sync { - /// fires when chain has new blocks + /// fires when chain has new blocks. fn new_blocks(&self, _imported: Vec, _invalid: Vec, diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 4013a74c2..36da0dcef 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -33,7 +33,7 @@ use util::kvdb::*; use ethkey::recover; use io::*; use views::{BlockView, HeaderView, BodyView}; -use error::{ImportError, ExecutionError, CallError, BlockError, ImportResult}; +use error::{ImportError, ExecutionError, CallError, BlockError, ImportResult, Error as EthcoreError}; use header::BlockNumber; use state::State; use spec::Spec; @@ -123,11 +123,13 @@ impl SleepState { /// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue. pub struct Client { mode: Mode, - chain: Arc, - tracedb: Arc>, + chain: RwLock>, + tracedb: RwLock>, engine: Arc, - db: Arc, - state_db: Mutex>, + config: ClientConfig, + db: RwLock>, + pruning: journaldb::Algorithm, + state_db: RwLock>, block_queue: BlockQueue, report: RwLock, import_lock: Mutex<()>, @@ -160,17 +162,14 @@ impl Client { path: &Path, miner: Arc, message_channel: IoChannel, + db_config: &DatabaseConfig, ) -> Result, ClientError> { let path = path.to_path_buf(); let gb = spec.genesis_block(); - let mut db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); - db_config.cache_size = config.db_cache_size; - db_config.compaction = config.db_compaction.compaction_profile(); - db_config.wal = config.db_wal; let db = Arc::new(try!(Database::open(&db_config, &path.to_str().unwrap()).map_err(ClientError::Database))); - let chain = Arc::new(BlockChain::new(config.blockchain, &gb, db.clone())); - let tracedb = Arc::new(try!(TraceDB::new(config.tracing, db.clone(), chain.clone()))); + let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone())); + let tracedb = RwLock::new(try!(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone()))); let mut state_db = journaldb::new(db.clone(), config.pruning, ::db::COL_STATE); if state_db.is_empty() && try!(spec.ensure_db_good(state_db.as_hashdb_mut())) { @@ -185,32 +184,34 @@ impl Client { let engine = spec.engine.clone(); - let block_queue = BlockQueue::new(config.queue, engine.clone(), message_channel.clone()); + let block_queue = BlockQueue::new(config.queue.clone(), engine.clone(), message_channel.clone()); let panic_handler = PanicHandler::new_in_arc(); panic_handler.forward_from(&block_queue); let awake = match config.mode { Mode::Dark(..) => false, _ => true }; let factories = Factories { - vm: EvmFactory::new(config.vm_type), - trie: TrieFactory::new(config.trie_spec), + vm: EvmFactory::new(config.vm_type.clone()), + trie: TrieFactory::new(config.trie_spec.clone()), accountdb: Default::default(), }; let client = Client { sleep_state: Mutex::new(SleepState::new(awake)), liveness: AtomicBool::new(awake), - mode: config.mode, - chain: chain, + mode: config.mode.clone(), + chain: RwLock::new(chain), tracedb: tracedb, engine: engine, - db: db, - state_db: Mutex::new(state_db), + pruning: config.pruning.clone(), + verifier: verification::new(config.verifier_type.clone()), + config: config, + db: RwLock::new(db), + state_db: RwLock::new(state_db), block_queue: block_queue, report: RwLock::new(Default::default()), import_lock: Mutex::new(()), panic_handler: panic_handler, - verifier: verification::new(config.verifier_type), miner: miner, io_channel: message_channel, notify: RwLock::new(Vec::new()), @@ -254,8 +255,9 @@ impl Client { let mut last_hashes = LastHashes::new(); last_hashes.resize(256, H256::default()); last_hashes[0] = parent_hash; + let chain = self.chain.read(); for i in 0..255 { - match self.chain.block_details(&last_hashes[i]) { + match chain.block_details(&last_hashes[i]) { Some(details) => { last_hashes[i + 1] = details.parent.clone(); }, @@ -271,22 +273,23 @@ impl Client { let engine = &*self.engine; let header = &block.header; + let chain = self.chain.read(); // Check the block isn't so old we won't be able to enact it. - let best_block_number = self.chain.best_block_number(); + let best_block_number = chain.best_block_number(); if best_block_number >= HISTORY && header.number() <= best_block_number - HISTORY { warn!(target: "client", "Block import failed for #{} ({})\nBlock is ancient (current best block: #{}).", header.number(), header.hash(), best_block_number); return Err(()); } // Verify Block Family - let verify_family_result = self.verifier.verify_block_family(header, &block.bytes, engine, &*self.chain); + let verify_family_result = self.verifier.verify_block_family(header, &block.bytes, engine, &**chain); if let Err(e) = verify_family_result { warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); return Err(()); }; // Check if Parent is in chain - let chain_has_parent = self.chain.block_header(header.parent_hash()); + let chain_has_parent = chain.block_header(header.parent_hash()); if let None = chain_has_parent { warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash()); return Err(()); @@ -295,9 +298,9 @@ impl Client { // Enact Verified Block let parent = chain_has_parent.unwrap(); let last_hashes = self.build_last_hashes(header.parent_hash().clone()); - let db = self.state_db.lock().boxed_clone(); + let db = self.state_db.read().boxed_clone(); - let enact_result = enact_verified(block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, self.factories.clone()); + let enact_result = enact_verified(block, engine, self.tracedb.read().tracing_enabled(), db, &parent, last_hashes, self.factories.clone()); if let Err(e) = enact_result { warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); return Err(()); @@ -409,17 +412,18 @@ impl Client { } } - self.db.flush().expect("DB flush failed."); + self.db.read().flush().expect("DB flush failed."); imported } fn commit_block(&self, block: B, hash: &H256, block_data: &[u8]) -> ImportRoute where B: IsBlock + Drain { let number = block.header().number(); let parent = block.header().parent_hash().clone(); + let chain = self.chain.read(); // Are we committing an era? let ancient = if number >= HISTORY { let n = number - HISTORY; - Some((n, self.chain.block_hash(n).unwrap())) + Some((n, chain.block_hash(n).unwrap())) } else { None }; @@ -433,14 +437,14 @@ impl Client { //let traces = From::from(block.traces().clone().unwrap_or_else(Vec::new)); - let mut batch = DBTransaction::new(&self.db); + let mut batch = DBTransaction::new(&self.db.read()); // CHECK! I *think* this is fine, even if the state_root is equal to another // already-imported block of the same number. // TODO: Prove it with a test. block.drain().commit(&mut batch, number, hash, ancient).expect("DB commit failed."); - let route = self.chain.insert_block(&mut batch, block_data, receipts); - self.tracedb.import(&mut batch, TraceImportRequest { + let route = chain.insert_block(&mut batch, block_data, receipts); + self.tracedb.read().import(&mut batch, TraceImportRequest { traces: traces.into(), block_hash: hash.clone(), block_number: number, @@ -448,8 +452,8 @@ impl Client { retracted: route.retracted.len() }); // Final commit to the DB - self.db.write_buffered(batch); - self.chain.commit(); + self.db.read().write_buffered(batch); + chain.commit(); self.update_last_hashes(&parent, hash); route @@ -492,10 +496,10 @@ impl Client { }; self.block_header(id).and_then(|header| { - let db = self.state_db.lock().boxed_clone(); + let db = self.state_db.read().boxed_clone(); // early exit for pruned blocks - if db.is_pruned() && self.chain.best_block_number() >= block_number + HISTORY { + if db.is_pruned() && self.chain.read().best_block_number() >= block_number + HISTORY { return None; } @@ -523,7 +527,7 @@ impl Client { /// Get a copy of the best block's state. pub fn state(&self) -> State { State::from_existing( - self.state_db.lock().boxed_clone(), + self.state_db.read().boxed_clone(), HeaderView::new(&self.best_block_header()).state_root(), self.engine.account_start_nonce(), self.factories.clone()) @@ -532,22 +536,22 @@ impl Client { /// Get info on the cache. pub fn blockchain_cache_info(&self) -> BlockChainCacheSize { - self.chain.cache_size() + self.chain.read().cache_size() } /// Get the report. pub fn report(&self) -> ClientReport { let mut report = self.report.read().clone(); - report.state_db_mem = self.state_db.lock().mem_used(); + report.state_db_mem = self.state_db.read().mem_used(); report } /// Tick the client. // TODO: manage by real events. pub fn tick(&self) { - self.chain.collect_garbage(); + self.chain.read().collect_garbage(); self.block_queue.collect_garbage(); - self.tracedb.collect_garbage(); + self.tracedb.read().collect_garbage(); match self.mode { Mode::Dark(timeout) => { @@ -585,16 +589,16 @@ impl Client { pub fn block_number(&self, id: BlockID) -> Option { match id { BlockID::Number(number) => Some(number), - BlockID::Hash(ref hash) => self.chain.block_number(hash), + BlockID::Hash(ref hash) => self.chain.read().block_number(hash), BlockID::Earliest => Some(0), - BlockID::Latest | BlockID::Pending => Some(self.chain.best_block_number()), + BlockID::Latest | BlockID::Pending => Some(self.chain.read().best_block_number()), } } /// Take a snapshot at the given block. /// If the ID given is "latest", this will default to 1000 blocks behind. - pub fn take_snapshot(&self, writer: W, at: BlockID, p: &snapshot::Progress) -> Result<(), ::error::Error> { - let db = self.state_db.lock().boxed_clone(); + pub fn take_snapshot(&self, writer: W, at: BlockID, p: &snapshot::Progress) -> Result<(), EthcoreError> { + let db = self.state_db.read().boxed_clone(); let best_block_number = self.chain_info().best_block_number; let block_number = try!(self.block_number(at).ok_or(snapshot::Error::InvalidStartingBlock(at))); @@ -619,7 +623,7 @@ impl Client { }, }; - try!(snapshot::take_snapshot(&self.chain, start_hash, db.as_hashdb(), writer, p)); + try!(snapshot::take_snapshot(&self.chain.read(), start_hash, db.as_hashdb(), writer, p)); Ok(()) } @@ -635,8 +639,8 @@ impl Client { fn transaction_address(&self, id: TransactionID) -> Option { match id { - TransactionID::Hash(ref hash) => self.chain.transaction_address(hash), - TransactionID::Location(id, index) => Self::block_hash(&self.chain, id).map(|hash| TransactionAddress { + TransactionID::Hash(ref hash) => self.chain.read().transaction_address(hash), + TransactionID::Location(id, index) => Self::block_hash(&self.chain.read(), id).map(|hash| TransactionAddress { block_hash: hash, index: index, }) @@ -667,6 +671,25 @@ impl Client { } } +impl snapshot::DatabaseRestore for Client { + /// Restart the client with a new backend + fn restore_db(&self, new_db: &str) -> Result<(), EthcoreError> { + let _import_lock = self.import_lock.lock(); + let mut state_db = self.state_db.write(); + let mut chain = self.chain.write(); + let mut tracedb = self.tracedb.write(); + self.miner.clear(); + let db = self.db.write(); + try!(db.restore(new_db)); + + *state_db = journaldb::new(db.clone(), self.pruning, ::db::COL_STATE); + *chain = Arc::new(BlockChain::new(self.config.blockchain.clone(), &[], db.clone())); + *tracedb = try!(TraceDB::new(self.config.tracing.clone(), db.clone(), chain.clone()).map_err(ClientError::from)); + Ok(()) + } +} + + impl BlockChainClient for Client { fn call(&self, t: &SignedTransaction, block: BlockID, analytics: CallAnalytics) -> Result { let header = try!(self.block_header(block).ok_or(CallError::StatePruned)); @@ -750,15 +773,17 @@ impl BlockChainClient for Client { } fn best_block_header(&self) -> Bytes { - self.chain.best_block_header() + self.chain.read().best_block_header() } fn block_header(&self, id: BlockID) -> Option { - Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_header_data(&hash)) + let chain = self.chain.read(); + Self::block_hash(&chain, id).and_then(|hash| chain.block_header_data(&hash)) } fn block_body(&self, id: BlockID) -> Option { - Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_body(&hash)) + let chain = self.chain.read(); + Self::block_hash(&chain, id).and_then(|hash| chain.block_body(&hash)) } fn block(&self, id: BlockID) -> Option { @@ -767,14 +792,16 @@ impl BlockChainClient for Client { return Some(block.rlp_bytes(Seal::Without)); } } - Self::block_hash(&self.chain, id).and_then(|hash| { - self.chain.block(&hash) + let chain = self.chain.read(); + Self::block_hash(&chain, id).and_then(|hash| { + chain.block(&hash) }) } fn block_status(&self, id: BlockID) -> BlockStatus { - match Self::block_hash(&self.chain, id) { - Some(ref hash) if self.chain.is_known(hash) => BlockStatus::InChain, + let chain = self.chain.read(); + match Self::block_hash(&chain, id) { + Some(ref hash) if chain.is_known(hash) => BlockStatus::InChain, Some(hash) => self.block_queue.block_status(&hash), None => BlockStatus::Unknown } @@ -786,7 +813,8 @@ impl BlockChainClient for Client { return Some(*block.header.difficulty() + self.block_total_difficulty(BlockID::Latest).expect("blocks in chain have details; qed")); } } - Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_details(&hash)).map(|d| d.total_difficulty) + let chain = self.chain.read(); + Self::block_hash(&chain, id).and_then(|hash| chain.block_details(&hash)).map(|d| d.total_difficulty) } fn nonce(&self, address: &Address, id: BlockID) -> Option { @@ -794,7 +822,8 @@ impl BlockChainClient for Client { } fn block_hash(&self, id: BlockID) -> Option { - Self::block_hash(&self.chain, id) + let chain = self.chain.read(); + Self::block_hash(&chain, id) } fn code(&self, address: &Address, id: BlockID) -> Option> { @@ -810,7 +839,7 @@ impl BlockChainClient for Client { } fn transaction(&self, id: TransactionID) -> Option { - self.transaction_address(id).and_then(|address| self.chain.transaction(&address)) + self.transaction_address(id).and_then(|address| self.chain.read().transaction(&address)) } fn uncle(&self, id: UncleID) -> Option { @@ -819,11 +848,12 @@ impl BlockChainClient for Client { } fn transaction_receipt(&self, id: TransactionID) -> Option { - self.transaction_address(id).and_then(|address| self.chain.block_number(&address.block_hash).and_then(|block_number| { - let t = self.chain.block_body(&address.block_hash) + let chain = self.chain.read(); + self.transaction_address(id).and_then(|address| chain.block_number(&address.block_hash).and_then(|block_number| { + let t = chain.block_body(&address.block_hash) .and_then(|block| BodyView::new(&block).localized_transaction_at(&address.block_hash, block_number, address.index)); - match (t, self.chain.transaction_receipt(&address)) { + match (t, chain.transaction_receipt(&address)) { (Some(tx), Some(receipt)) => { let block_hash = tx.block_hash.clone(); let block_number = tx.block_number.clone(); @@ -833,7 +863,7 @@ impl BlockChainClient for Client { 0 => U256::zero(), i => { let prior_address = TransactionAddress { block_hash: address.block_hash, index: i - 1 }; - let prior_receipt = self.chain.transaction_receipt(&prior_address).expect("Transaction receipt at `address` exists; `prior_address` has lower index in same block; qed"); + let prior_receipt = chain.transaction_receipt(&prior_address).expect("Transaction receipt at `address` exists; `prior_address` has lower index in same block; qed"); prior_receipt.gas_used } }; @@ -864,28 +894,29 @@ impl BlockChainClient for Client { } fn tree_route(&self, from: &H256, to: &H256) -> Option { - match self.chain.is_known(from) && self.chain.is_known(to) { - true => Some(self.chain.tree_route(from.clone(), to.clone())), + let chain = self.chain.read(); + match chain.is_known(from) && chain.is_known(to) { + true => Some(chain.tree_route(from.clone(), to.clone())), false => None } } fn find_uncles(&self, hash: &H256) -> Option> { - self.chain.find_uncle_hashes(hash, self.engine.maximum_uncle_age()) + self.chain.read().find_uncle_hashes(hash, self.engine.maximum_uncle_age()) } fn state_data(&self, hash: &H256) -> Option { - self.state_db.lock().state(hash) + self.state_db.read().state(hash) } fn block_receipts(&self, hash: &H256) -> Option { - self.chain.block_receipts(hash).map(|receipts| ::rlp::encode(&receipts).to_vec()) + self.chain.read().block_receipts(hash).map(|receipts| ::rlp::encode(&receipts).to_vec()) } fn import_block(&self, bytes: Bytes) -> Result { { let header = BlockView::new(&bytes).header_view(); - if self.chain.is_known(&header.sha3()) { + if self.chain.read().is_known(&header.sha3()) { return Err(BlockImportError::Import(ImportError::AlreadyInChain)); } if self.block_status(BlockID::Hash(header.parent_hash())) == BlockStatus::Unknown { @@ -904,12 +935,13 @@ impl BlockChainClient for Client { } fn chain_info(&self) -> BlockChainInfo { + let chain = self.chain.read(); BlockChainInfo { - total_difficulty: self.chain.best_block_total_difficulty(), - pending_total_difficulty: self.chain.best_block_total_difficulty(), - genesis_hash: self.chain.genesis_hash(), - best_block_hash: self.chain.best_block_hash(), - best_block_number: From::from(self.chain.best_block_number()) + total_difficulty: chain.best_block_total_difficulty(), + pending_total_difficulty: chain.best_block_total_difficulty(), + genesis_hash: chain.genesis_hash(), + best_block_hash: chain.best_block_hash(), + best_block_number: From::from(chain.best_block_number()) } } @@ -919,7 +951,7 @@ impl BlockChainClient for Client { fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockID, to_block: BlockID) -> Option> { match (self.block_number(from_block), self.block_number(to_block)) { - (Some(from), Some(to)) => Some(self.chain.blocks_with_bloom(bloom, from, to)), + (Some(from), Some(to)) => Some(self.chain.read().blocks_with_bloom(bloom, from, to)), _ => None } } @@ -937,10 +969,11 @@ impl BlockChainClient for Client { blocks.sort(); + let chain = self.chain.read(); blocks.into_iter() - .filter_map(|number| self.chain.block_hash(number).map(|hash| (number, hash))) - .filter_map(|(number, hash)| self.chain.block_receipts(&hash).map(|r| (number, hash, r.receipts))) - .filter_map(|(number, hash, receipts)| self.chain.block_body(&hash).map(|ref b| (number, hash, receipts, BodyView::new(b).transaction_hashes()))) + .filter_map(|number| chain.block_hash(number).map(|hash| (number, hash))) + .filter_map(|(number, hash)| chain.block_receipts(&hash).map(|r| (number, hash, r.receipts))) + .filter_map(|(number, hash, receipts)| chain.block_body(&hash).map(|ref b| (number, hash, receipts, BodyView::new(b).transaction_hashes()))) .flat_map(|(number, hash, receipts, hashes)| { let mut log_index = 0; receipts.into_iter() @@ -976,7 +1009,7 @@ impl BlockChainClient for Client { to_address: From::from(filter.to_address), }; - let traces = self.tracedb.filter(&filter); + let traces = self.tracedb.read().filter(&filter); Some(traces) } else { None @@ -988,7 +1021,7 @@ impl BlockChainClient for Client { self.transaction_address(trace.transaction) .and_then(|tx_address| { self.block_number(BlockID::Hash(tx_address.block_hash)) - .and_then(|number| self.tracedb.trace(number, tx_address.index, trace_address)) + .and_then(|number| self.tracedb.read().trace(number, tx_address.index, trace_address)) }) } @@ -996,17 +1029,17 @@ impl BlockChainClient for Client { self.transaction_address(transaction) .and_then(|tx_address| { self.block_number(BlockID::Hash(tx_address.block_hash)) - .and_then(|number| self.tracedb.transaction_traces(number, tx_address.index)) + .and_then(|number| self.tracedb.read().transaction_traces(number, tx_address.index)) }) } fn block_traces(&self, block: BlockID) -> Option> { self.block_number(block) - .and_then(|number| self.tracedb.block_traces(number)) + .and_then(|number| self.tracedb.read().block_traces(number)) } fn last_hashes(&self) -> LastHashes { - (*self.build_last_hashes(self.chain.best_block_hash())).clone() + (*self.build_last_hashes(self.chain.read().best_block_hash())).clone() } fn queue_transactions(&self, transactions: Vec) { @@ -1048,14 +1081,15 @@ impl BlockChainClient for Client { impl MiningBlockChainClient for Client { fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) -> OpenBlock { let engine = &*self.engine; - let h = self.chain.best_block_hash(); + let chain = self.chain.read(); + let h = chain.best_block_hash(); let mut open_block = OpenBlock::new( engine, self.factories.clone(), false, // TODO: this will need to be parameterised once we want to do immediate mining insertion. - self.state_db.lock().boxed_clone(), - &self.chain.block_header(&h).expect("h is best block hash: so its header must exist: qed"), + self.state_db.read().boxed_clone(), + &chain.block_header(&h).expect("h is best block hash: so its header must exist: qed"), self.build_last_hashes(h.clone()), author, gas_range_target, @@ -1063,7 +1097,7 @@ impl MiningBlockChainClient for Client { ).expect("OpenBlock::new only fails if parent state root invalid; state root of best block's header is never invalid; qed"); // Add uncles - self.chain + chain .find_uncle_headers(&h, engine.maximum_uncle_age()) .unwrap() .into_iter() @@ -1104,7 +1138,7 @@ impl MiningBlockChainClient for Client { precise_time_ns() - start, ); }); - self.db.flush().expect("DB flush failed."); + self.db.read().flush().expect("DB flush failed."); Ok(h) } } diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs index 504ca4de7..bb70de6cd 100644 --- a/ethcore/src/client/config.rs +++ b/ethcore/src/client/config.rs @@ -62,7 +62,7 @@ impl FromStr for DatabaseCompactionProfile { } /// Operating mode for the client. -#[derive(Debug, Eq, PartialEq)] +#[derive(Debug, Eq, PartialEq, Clone)] pub enum Mode { /// Always on. Active, diff --git a/ethcore/src/engines/basic_authority.rs b/ethcore/src/engines/basic_authority.rs index eef3df6b1..18dfeec46 100644 --- a/ethcore/src/engines/basic_authority.rs +++ b/ethcore/src/engines/basic_authority.rs @@ -187,7 +187,10 @@ mod tests { use spec::Spec; /// Create a new test chain spec with `BasicAuthority` consensus engine. - fn new_test_authority() -> Spec { Spec::load(include_bytes!("../../res/test_authority.json")) } + fn new_test_authority() -> Spec { + let bytes: &[u8] = include_bytes!("../../res/test_authority.json"); + Spec::load(bytes).expect("invalid chain spec") + } #[test] fn has_valid_metadata() { diff --git a/ethcore/src/engines/instant_seal.rs b/ethcore/src/engines/instant_seal.rs index bdb882ee7..3c95f3465 100644 --- a/ethcore/src/engines/instant_seal.rs +++ b/ethcore/src/engines/instant_seal.rs @@ -72,7 +72,10 @@ mod tests { use block::*; /// Create a new test chain spec with `BasicAuthority` consensus engine. - fn new_test_instant() -> Spec { Spec::load(include_bytes!("../../res/instant_seal.json")) } + fn new_test_instant() -> Spec { + let bytes: &[u8] = include_bytes!("../../res/instant_seal.json"); + Spec::load(bytes).expect("invalid chain spec") + } #[test] fn instant_can_seal() { diff --git a/ethcore/src/ethereum/mod.rs b/ethcore/src/ethereum/mod.rs index 1efe001e5..6d46d5551 100644 --- a/ethcore/src/ethereum/mod.rs +++ b/ethcore/src/ethereum/mod.rs @@ -29,29 +29,33 @@ pub use self::denominations::*; use super::spec::*; +fn load(b: &[u8]) -> Spec { + Spec::load(b).expect("chain spec is invalid") +} + /// Create a new Olympic chain spec. -pub fn new_olympic() -> Spec { Spec::load(include_bytes!("../../res/ethereum/olympic.json")) } +pub fn new_olympic() -> Spec { load(include_bytes!("../../res/ethereum/olympic.json")) } /// Create a new Frontier mainnet chain spec. -pub fn new_frontier() -> Spec { Spec::load(include_bytes!("../../res/ethereum/frontier.json")) } +pub fn new_frontier() -> Spec { load(include_bytes!("../../res/ethereum/frontier.json")) } /// Create a new Frontier mainnet chain spec without the DAO hardfork. -pub fn new_classic() -> Spec { Spec::load(include_bytes!("../../res/ethereum/classic.json")) } +pub fn new_classic() -> Spec { load(include_bytes!("../../res/ethereum/classic.json")) } /// Create a new Frontier chain spec as though it never changes to Homestead. -pub fn new_frontier_test() -> Spec { Spec::load(include_bytes!("../../res/ethereum/frontier_test.json")) } +pub fn new_frontier_test() -> Spec { load(include_bytes!("../../res/ethereum/frontier_test.json")) } /// Create a new Homestead chain spec as though it never changed from Frontier. -pub fn new_homestead_test() -> Spec { Spec::load(include_bytes!("../../res/ethereum/homestead_test.json")) } +pub fn new_homestead_test() -> Spec { load(include_bytes!("../../res/ethereum/homestead_test.json")) } /// Create a new Frontier/Homestead/DAO chain spec with transition points at #5 and #8. -pub fn new_daohardfork_test() -> Spec { Spec::load(include_bytes!("../../res/ethereum/daohardfork_test.json")) } +pub fn new_daohardfork_test() -> Spec { load(include_bytes!("../../res/ethereum/daohardfork_test.json")) } /// Create a new Frontier main net chain spec without genesis accounts. -pub fn new_mainnet_like() -> Spec { Spec::load(include_bytes!("../../res/ethereum/frontier_like_test.json")) } +pub fn new_mainnet_like() -> Spec { load(include_bytes!("../../res/ethereum/frontier_like_test.json")) } /// Create a new Morden chain spec. -pub fn new_morden() -> Spec { Spec::load(include_bytes!("../../res/ethereum/morden.json")) } +pub fn new_morden() -> Spec { load(include_bytes!("../../res/ethereum/morden.json")) } #[cfg(test)] mod tests { diff --git a/ethcore/src/json_tests/chain.rs b/ethcore/src/json_tests/chain.rs index 16161e158..93b0cf82c 100644 --- a/ethcore/src/json_tests/chain.rs +++ b/ethcore/src/json_tests/chain.rs @@ -58,12 +58,14 @@ pub fn json_chain_test(json_data: &[u8], era: ChainEra) -> Vec { let temp = RandomTempPath::new(); { + let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); let client = Client::new( ClientConfig::default(), &spec, temp.as_path(), Arc::new(Miner::with_spec(&spec)), - IoChannel::disconnected() + IoChannel::disconnected(), + &db_config, ).unwrap(); for b in &blockchain.blocks_rlp() { if Block::is_good(&b) { diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index a2533ecde..c9d60f075 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -227,6 +227,11 @@ impl Miner { self.options.force_sealing || !self.options.new_work_notify.is_empty() } + /// Clear all pending block states + pub fn clear(&self) { + self.sealing_work.lock().queue.reset(); + } + /// Get `Some` `clone()` of the current pending block's state or `None` if we're not sealing. pub fn pending_state(&self) -> Option { self.sealing_work.lock().queue.peek_last_ref().map(|b| b.block().fields().state.clone()) diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index e2e4772a4..a2b483d40 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -23,7 +23,7 @@ use error::*; use client::{Client, ClientConfig, ChainNotify}; use miner::Miner; use snapshot::ManifestData; -use snapshot::service::Service as SnapshotService; +use snapshot::service::{Service as SnapshotService, ServiceParams as SnapServiceParams}; use std::sync::atomic::AtomicBool; #[cfg(feature="ipc")] @@ -46,6 +46,8 @@ pub enum ClientIoMessage { FeedStateChunk(H256, Bytes), /// Feed a block chunk to the snapshot service FeedBlockChunk(H256, Bytes), + /// Take a snapshot for the block with given number. + TakeSnapshot(u64), } /// Client service setup. Creates and registers client and network services with the IO subsystem. @@ -58,11 +60,12 @@ pub struct ClientService { } impl ClientService { - /// Start the service in a separate thread. + /// Start the `ClientService`. pub fn start( config: ClientConfig, spec: &Spec, - db_path: &Path, + client_path: &Path, + snapshot_path: &Path, ipc_path: &Path, miner: Arc, ) -> Result @@ -76,11 +79,25 @@ impl ClientService { warn!("Your chain is an alternative fork. {}", Colour::Red.bold().paint("TRANSACTIONS MAY BE REPLAYED ON THE MAINNET!")); } - let pruning = config.pruning; - let client = try!(Client::new(config, &spec, db_path, miner, io_service.channel())); - let snapshot = try!(SnapshotService::new(spec, pruning, db_path.into(), io_service.channel())); + let mut db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + db_config.cache_size = config.db_cache_size; + db_config.compaction = config.db_compaction.compaction_profile(); + db_config.wal = config.db_wal; - let snapshot = Arc::new(snapshot); + let pruning = config.pruning; + let client = try!(Client::new(config, &spec, client_path, miner, io_service.channel(), &db_config)); + + let snapshot_params = SnapServiceParams { + engine: spec.engine.clone(), + genesis_block: spec.genesis_block(), + db_config: db_config, + pruning: pruning, + channel: io_service.channel(), + snapshot_root: snapshot_path.into(), + client_db: client_path.into(), + db_restore: client.clone(), + }; + let snapshot = Arc::new(try!(SnapshotService::new(snapshot_params))); panic_handler.forward_from(&*client); let client_io = Arc::new(ClientIoHandler { @@ -90,7 +107,7 @@ impl ClientService { try!(io_service.register_handler(client_io)); let stop_guard = ::devtools::StopGuard::new(); - run_ipc(ipc_path, client.clone(), stop_guard.share()); + run_ipc(ipc_path, client.clone(), snapshot.clone(), stop_guard.share()); Ok(ClientService { io_service: Arc::new(io_service), @@ -145,16 +162,22 @@ struct ClientIoHandler { } const CLIENT_TICK_TIMER: TimerToken = 0; +const SNAPSHOT_TICK_TIMER: TimerToken = 1; + const CLIENT_TICK_MS: u64 = 5000; +const SNAPSHOT_TICK_MS: u64 = 10000; impl IoHandler for ClientIoHandler { fn initialize(&self, io: &IoContext) { io.register_timer(CLIENT_TICK_TIMER, CLIENT_TICK_MS).expect("Error registering client timer"); + io.register_timer(SNAPSHOT_TICK_TIMER, SNAPSHOT_TICK_MS).expect("Error registering snapshot timer"); } fn timeout(&self, _io: &IoContext, timer: TimerToken) { - if timer == CLIENT_TICK_TIMER { - self.client.tick(); + match timer { + CLIENT_TICK_TIMER => self.client.tick(), + SNAPSHOT_TICK_TIMER => self.snapshot.tick(), + _ => warn!("IO service triggered unregistered timer '{}'", timer), } } @@ -170,20 +193,38 @@ impl IoHandler for ClientIoHandler { } ClientIoMessage::FeedStateChunk(ref hash, ref chunk) => self.snapshot.feed_state_chunk(*hash, chunk), ClientIoMessage::FeedBlockChunk(ref hash, ref chunk) => self.snapshot.feed_block_chunk(*hash, chunk), + ClientIoMessage::TakeSnapshot(num) => { + if let Err(e) = self.snapshot.take_snapshot(&*self.client, num) { + warn!("Failed to take snapshot at block #{}: {}", num, e); + } + } _ => {} // ignore other messages } } } #[cfg(feature="ipc")] -fn run_ipc(base_path: &Path, client: Arc, stop: Arc) { +fn run_ipc(base_path: &Path, client: Arc, snapshot_service: Arc, stop: Arc) { let mut path = base_path.to_owned(); path.push("parity-chain.ipc"); let socket_addr = format!("ipc://{}", path.to_string_lossy()); + let s = stop.clone(); ::std::thread::spawn(move || { let mut worker = nanoipc::Worker::new(&(client as Arc)); worker.add_reqrep(&socket_addr).expect("Ipc expected to initialize with no issues"); + while !s.load(::std::sync::atomic::Ordering::Relaxed) { + worker.poll(); + } + }); + + let mut path = base_path.to_owned(); + path.push("parity-snapshot.ipc"); + let socket_addr = format!("ipc://{}", path.to_string_lossy()); + ::std::thread::spawn(move || { + let mut worker = nanoipc::Worker::new(&(snapshot_service as Arc<::snapshot::SnapshotService>)); + worker.add_reqrep(&socket_addr).expect("Ipc expected to initialize with no issues"); + while !stop.load(::std::sync::atomic::Ordering::Relaxed) { worker.poll(); } @@ -191,7 +232,7 @@ fn run_ipc(base_path: &Path, client: Arc, stop: Arc) { } #[cfg(not(feature="ipc"))] -fn run_ipc(_base_path: &Path, _client: Arc, _stop: Arc) { +fn run_ipc(_base_path: &Path, _client: Arc, _snapshot_service: Arc, _stop: Arc) { } #[cfg(test)] @@ -206,15 +247,25 @@ mod tests { #[test] fn it_can_be_started() { let temp_path = RandomTempPath::new(); - let mut path = temp_path.as_path().to_owned(); - path.push("pruning"); - path.push("db"); + let path = temp_path.as_path().to_owned(); + let client_path = { + let mut path = path.to_owned(); + path.push("client"); + path + }; + + let snapshot_path = { + let mut path = path.to_owned(); + path.push("snapshot"); + path + }; let spec = get_test_spec(); let service = ClientService::start( ClientConfig::default(), &spec, - &path, + &client_path, + &snapshot_path, &path, Arc::new(Miner::with_spec(&spec)), ); diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index 2a81b967d..43622fc51 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -32,9 +32,9 @@ use util::Mutex; use util::hash::{FixedHash, H256}; use util::journaldb::{self, Algorithm, JournalDB}; use util::kvdb::Database; -use util::sha3::SHA3_NULL_RLP; use util::trie::{TrieDB, TrieDBMut, Trie, TrieMut}; -use rlp::{DecoderError, RlpStream, Stream, UntrustedRlp, View, Compressible, RlpType}; +use util::sha3::SHA3_NULL_RLP; +use rlp::{RlpStream, Stream, UntrustedRlp, View, Compressible, RlpType}; use self::account::Account; use self::block::AbridgedBlock; @@ -44,7 +44,12 @@ use crossbeam::{scope, ScopedJoinHandle}; use rand::{Rng, OsRng}; pub use self::error::Error; -pub use self::service::{RestorationStatus, Service, SnapshotService}; + +pub use self::service::{Service, DatabaseRestore}; +pub use self::traits::{SnapshotService, RemoteSnapshotService}; +pub use self::watcher::Watcher; +pub use types::snapshot_manifest::ManifestData; +pub use types::restoration_status::RestorationStatus; pub mod io; pub mod service; @@ -52,10 +57,16 @@ pub mod service; mod account; mod block; mod error; +mod watcher; #[cfg(test)] mod tests; +mod traits { + #![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues + include!(concat!(env!("OUT_DIR"), "/snapshot_service_trait.rs")); +} + // Try to have chunks be around 4MB (before compression) const PREFERRED_CHUNK_SIZE: usize = 4 * 1024 * 1024; @@ -72,17 +83,28 @@ pub struct Progress { } impl Progress { + /// Reset the progress. + pub fn reset(&self) { + self.accounts.store(0, Ordering::Release); + self.blocks.store(0, Ordering::Release); + self.size.store(0, Ordering::Release); + + // atomic fence here to ensure the others are written first? + // logs might very rarely get polluted if not. + self.done.store(false, Ordering::Release); + } + /// Get the number of accounts snapshotted thus far. - pub fn accounts(&self) -> usize { self.accounts.load(Ordering::Relaxed) } + pub fn accounts(&self) -> usize { self.accounts.load(Ordering::Acquire) } /// Get the number of blocks snapshotted thus far. - pub fn blocks(&self) -> usize { self.blocks.load(Ordering::Relaxed) } + pub fn blocks(&self) -> usize { self.blocks.load(Ordering::Acquire) } /// Get the written size of the snapshot in bytes. - pub fn size(&self) -> usize { self.size.load(Ordering::Relaxed) } + pub fn size(&self) -> usize { self.size.load(Ordering::Acquire) } /// Whether the snapshot is complete. - pub fn done(&self) -> bool { self.done.load(Ordering::SeqCst) } + pub fn done(&self) -> bool { self.done.load(Ordering::Acquire) } } /// Take a snapshot using the given blockchain, starting block hash, and database, writing into the given writer. @@ -354,54 +376,6 @@ pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex, - /// List of block chunk hashes. - pub block_hashes: Vec, - /// The final, expected state root. - pub state_root: H256, - /// Block number this snapshot was taken at. - pub block_number: u64, - /// Block hash this snapshot was taken at. - pub block_hash: H256, -} - -impl ManifestData { - /// Encode the manifest data to rlp. - pub fn into_rlp(self) -> Bytes { - let mut stream = RlpStream::new_list(5); - stream.append(&self.state_hashes); - stream.append(&self.block_hashes); - stream.append(&self.state_root); - stream.append(&self.block_number); - stream.append(&self.block_hash); - - stream.out() - } - - /// Try to restore manifest data from raw bytes, interpreted as RLP. - pub fn from_rlp(raw: &[u8]) -> Result { - let decoder = UntrustedRlp::new(raw); - - let state_hashes: Vec = try!(decoder.val_at(0)); - let block_hashes: Vec = try!(decoder.val_at(1)); - let state_root: H256 = try!(decoder.val_at(2)); - let block_number: u64 = try!(decoder.val_at(3)); - let block_hash: H256 = try!(decoder.val_at(4)); - - Ok(ManifestData { - state_hashes: state_hashes, - block_hashes: block_hashes, - state_root: state_root, - block_number: block_number, - block_hash: block_hash, - }) - } -} - /// Used to rebuild the state trie piece by piece. pub struct StateRebuilder { db: Box, @@ -653,4 +627,4 @@ impl BlockRebuilder { } } } -} \ No newline at end of file +} diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index 40f629ad9..2a186378f 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -19,18 +19,19 @@ use std::collections::HashSet; use std::io::ErrorKind; use std::fs; -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use std::sync::Arc; -use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; -use super::{ManifestData, StateRebuilder, BlockRebuilder}; +use super::{ManifestData, StateRebuilder, BlockRebuilder, RestorationStatus, SnapshotService}; use super::io::{SnapshotReader, LooseReader, SnapshotWriter, LooseWriter}; use blockchain::BlockChain; +use client::Client; use engines::Engine; use error::Error; +use ids::BlockID; use service::ClientIoMessage; -use spec::Spec; use io::IoChannel; @@ -39,51 +40,27 @@ use util::journaldb::Algorithm; use util::kvdb::{Database, DatabaseConfig}; use util::snappy; -/// Statuses for restorations. -#[derive(PartialEq, Eq, Clone, Copy, Debug)] -pub enum RestorationStatus { - /// No restoration. - Inactive, - /// Ongoing restoration. - Ongoing, - /// Failed restoration. - Failed, +/// Helper for removing directories in case of error. +struct Guard(bool, PathBuf); + +impl Guard { + fn new(path: PathBuf) -> Self { Guard(true, path) } + + fn disarm(mut self) { self.0 = false } } -/// The interface for a snapshot network service. -/// This handles: -/// - restoration of snapshots to temporary databases. -/// - responding to queries for snapshot manifests and chunks -pub trait SnapshotService { - /// Query the most recent manifest data. - fn manifest(&self) -> Option; +impl Drop for Guard { + fn drop(&mut self) { + if self.0 { + let _ = fs::remove_dir_all(&self.1); + } + } +} - /// Get raw chunk for a given hash. - fn chunk(&self, hash: H256) -> Option; - - /// Ask the snapshot service for the restoration status. - fn status(&self) -> RestorationStatus; - - /// Ask the snapshot service for the number of chunks completed. - /// Return a tuple of (state_chunks, block_chunks). - /// Undefined when not restoring. - fn chunks_done(&self) -> (usize, usize); - - /// Begin snapshot restoration. - /// If restoration in-progress, this will reset it. - /// From this point on, any previous snapshot may become unavailable. - fn begin_restore(&self, manifest: ManifestData); - - /// Abort an in-progress restoration if there is one. - fn abort_restore(&self); - - /// Feed a raw state chunk to the service to be processed asynchronously. - /// no-op if not currently restoring. - fn restore_state_chunk(&self, hash: H256, chunk: Bytes); - - /// Feed a raw block chunk to the service to be processed asynchronously. - /// no-op if currently restoring. - fn restore_block_chunk(&self, hash: H256, chunk: Bytes); +/// External database restoration handler +pub trait DatabaseRestore: Send + Sync { + /// Restart with a new backend. Takes ownership of passed database and moves it to a new location. + fn restore_db(&self, new_db: &str) -> Result<(), Error>; } /// State restoration manager. @@ -96,14 +73,17 @@ struct Restoration { writer: LooseWriter, snappy_buffer: Bytes, final_state_root: H256, + guard: Guard, } struct RestorationParams<'a> { manifest: ManifestData, // manifest to base restoration on. pruning: Algorithm, // pruning algorithm for the database. db_path: PathBuf, // database path + db_config: &'a DatabaseConfig, writer: LooseWriter, // writer for recovered snapshot. genesis: &'a [u8], // genesis block of the chain. + guard: Guard, // guard for the restoration directory. } impl Restoration { @@ -114,8 +94,7 @@ impl Restoration { let state_chunks = manifest.state_hashes.iter().cloned().collect(); let block_chunks = manifest.block_hashes.iter().cloned().collect(); - let cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS); - let raw_db = Arc::new(try!(Database::open(&cfg, &*params.db_path.to_string_lossy()) + let raw_db = Arc::new(try!(Database::open(params.db_config, &*params.db_path.to_string_lossy()) .map_err(UtilError::SimpleString))); let chain = BlockChain::new(Default::default(), params.genesis, raw_db.clone()); @@ -131,6 +110,7 @@ impl Restoration { writer: params.writer, snappy_buffer: Vec::new(), final_state_root: root, + guard: params.guard, }) } @@ -179,6 +159,7 @@ impl Restoration { try!(self.writer.finish(self.manifest)); + self.guard.disarm(); Ok(()) } @@ -191,15 +172,35 @@ impl Restoration { /// Type alias for client io channel. pub type Channel = IoChannel; -/// Service implementation. -/// -/// This will replace the client's state DB as soon as the last state chunk -/// is fed, and will replace the client's blocks DB when the last block chunk -/// is fed. +/// Snapshot service parameters. +pub struct ServiceParams { + /// The consensus engine this is built on. + pub engine: Arc, + /// The chain's genesis block. + pub genesis_block: Bytes, + /// Database configuration options. + pub db_config: DatabaseConfig, + /// State pruning algorithm. + pub pruning: Algorithm, + /// Async IO channel for sending messages. + pub channel: Channel, + /// The directory to put snapshots in. + /// Usually "/snapshot" + pub snapshot_root: PathBuf, + /// The client's database directory. + /// Usually "//db". + pub client_db: PathBuf, + /// A handle for database restoration. + pub db_restore: Arc, +} + +/// `SnapshotService` implementation. +/// This controls taking snapshots and restoring from them. pub struct Service { restoration: Mutex>, - client_db: PathBuf, // "//db" - db_path: PathBuf, // "/" + client_db: PathBuf, + snapshot_root: PathBuf, + db_config: DatabaseConfig, io_channel: Channel, pruning: Algorithm, status: Mutex, @@ -208,38 +209,34 @@ pub struct Service { genesis_block: Bytes, state_chunks: AtomicUsize, block_chunks: AtomicUsize, + db_restore: Arc, + progress: super::Progress, + taking_snapshot: AtomicBool, } impl Service { - /// Create a new snapshot service. - pub fn new(spec: &Spec, pruning: Algorithm, client_db: PathBuf, io_channel: Channel) -> Result { - let db_path = try!(client_db.parent().and_then(Path::parent) - .ok_or_else(|| UtilError::SimpleString("Failed to find database root.".into()))).to_owned(); - - let reader = { - let mut snapshot_path = db_path.clone(); - snapshot_path.push("snapshot"); - snapshot_path.push("current"); - - LooseReader::new(snapshot_path).ok() - }; - - let service = Service { + /// Create a new snapshot service from the given parameters. + pub fn new(params: ServiceParams) -> Result { + let mut service = Service { restoration: Mutex::new(None), - client_db: client_db, - db_path: db_path, - io_channel: io_channel, - pruning: pruning, + client_db: params.client_db, + snapshot_root: params.snapshot_root, + db_config: params.db_config, + io_channel: params.channel, + pruning: params.pruning, status: Mutex::new(RestorationStatus::Inactive), - reader: RwLock::new(reader), - engine: spec.engine.clone(), - genesis_block: spec.genesis_block(), + reader: RwLock::new(None), + engine: params.engine, + genesis_block: params.genesis_block, state_chunks: AtomicUsize::new(0), block_chunks: AtomicUsize::new(0), + db_restore: params.db_restore, + progress: Default::default(), + taking_snapshot: AtomicBool::new(false), }; // create the root snapshot dir if it doesn't exist. - if let Err(e) = fs::create_dir_all(service.root_dir()) { + if let Err(e) = fs::create_dir_all(&service.snapshot_root) { if e.kind() != ErrorKind::AlreadyExists { return Err(e.into()) } @@ -252,26 +249,36 @@ impl Service { } } - Ok(service) - } + // delete the temporary snapshot dir if it does exist. + if let Err(e) = fs::remove_dir_all(service.temp_snapshot_dir()) { + if e.kind() != ErrorKind::NotFound { + return Err(e.into()) + } + } - // get the root path. - fn root_dir(&self) -> PathBuf { - let mut dir = self.db_path.clone(); - dir.push("snapshot"); - dir + let reader = LooseReader::new(service.snapshot_dir()).ok(); + *service.reader.get_mut() = reader; + + Ok(service) } // get the current snapshot dir. fn snapshot_dir(&self) -> PathBuf { - let mut dir = self.root_dir(); + let mut dir = self.snapshot_root.clone(); dir.push("current"); dir } + // get the temporary snapshot dir. + fn temp_snapshot_dir(&self) -> PathBuf { + let mut dir = self.snapshot_root.clone(); + dir.push("in_progress"); + dir + } + // get the restoration directory. fn restoration_dir(&self) -> PathBuf { - let mut dir = self.root_dir(); + let mut dir = self.snapshot_root.clone(); dir.push("restoration"); dir } @@ -295,37 +302,58 @@ impl Service { let our_db = self.restoration_db(); trace!(target: "snapshot", "replacing {:?} with {:?}", self.client_db, our_db); + try!(self.db_restore.restore_db(our_db.to_str().unwrap())); + Ok(()) + } - let mut backup_db = self.restoration_dir(); - backup_db.push("backup_db"); + /// Tick the snapshot service. This will log any active snapshot + /// being taken. + pub fn tick(&self) { + if self.progress.done() || !self.taking_snapshot.load(Ordering::SeqCst) { return } - let _ = fs::remove_dir_all(&backup_db); + let p = &self.progress; + info!("Snapshot: {} accounts {} blocks {} bytes", p.accounts(), p.blocks(), p.size()); + } - let existed = match fs::rename(&self.client_db, &backup_db) { - Ok(_) => true, - Err(e) => if let ErrorKind::NotFound = e.kind() { - false - } else { - return Err(e.into()); - } - }; - - match fs::rename(&our_db, &self.client_db) { - Ok(_) => { - // clean up the backup. - if existed { - try!(fs::remove_dir_all(&backup_db)); - } - Ok(()) - } - Err(e) => { - // restore the backup. - if existed { - try!(fs::rename(&backup_db, &self.client_db)); - } - Err(e.into()) - } + /// Take a snapshot at the block with the given number. + /// calling this while a restoration is in progress or vice versa + /// will lead to a race condition where the first one to finish will + /// have their produced snapshot overwritten. + pub fn take_snapshot(&self, client: &Client, num: u64) -> Result<(), Error> { + if self.taking_snapshot.compare_and_swap(false, true, Ordering::SeqCst) { + info!("Skipping snapshot at #{} as another one is currently in-progress.", num); + return Ok(()); } + + info!("Taking snapshot at #{}", num); + self.progress.reset(); + + let temp_dir = self.temp_snapshot_dir(); + let snapshot_dir = self.snapshot_dir(); + + let _ = fs::remove_dir_all(&temp_dir); + + let writer = try!(LooseWriter::new(temp_dir.clone())); + + let guard = Guard::new(temp_dir.clone()); + let res = client.take_snapshot(writer, BlockID::Number(num), &self.progress); + + self.taking_snapshot.store(false, Ordering::SeqCst); + try!(res); + + info!("Finished taking snapshot at #{}", num); + + let mut reader = self.reader.write(); + + // destroy the old snapshot reader. + *reader = None; + + try!(fs::rename(temp_dir, &snapshot_dir)); + + *reader = Some(try!(LooseReader::new(snapshot_dir))); + + guard.disarm(); + Ok(()) } /// Initialize the restoration synchronously. @@ -354,13 +382,18 @@ impl Service { manifest: manifest, pruning: self.pruning, db_path: self.restoration_db(), + db_config: &self.db_config, writer: writer, genesis: &self.genesis_block, + guard: Guard::new(rest_dir), }; *res = Some(try!(Restoration::new(params))); - *self.status.lock() = RestorationStatus::Ongoing; + *self.status.lock() = RestorationStatus::Ongoing { + state_chunks_done: self.state_chunks.load(Ordering::Relaxed) as u32, + block_chunks_done: self.block_chunks.load(Ordering::Relaxed) as u32, + }; Ok(()) } @@ -393,14 +426,7 @@ impl Service { try!(fs::create_dir(&snapshot_dir)); trace!(target: "snapshot", "copying restored snapshot files over"); - for maybe_file in try!(fs::read_dir(self.temp_recovery_dir())) { - let path = try!(maybe_file).path(); - if let Some(name) = path.file_name().map(|x| x.to_owned()) { - let mut new_path = snapshot_dir.clone(); - new_path.push(name); - try!(fs::rename(path, new_path)); - } - } + try!(fs::rename(self.temp_recovery_dir(), &snapshot_dir)); let _ = fs::remove_dir_all(self.restoration_dir()); @@ -418,7 +444,7 @@ impl Service { match self.status() { RestorationStatus::Inactive | RestorationStatus::Failed => Ok(()), - RestorationStatus::Ongoing => { + RestorationStatus::Ongoing { .. } => { let res = { let rest = match *restoration { Some(ref mut r) => r, @@ -489,10 +515,6 @@ impl SnapshotService for Service { *self.status.lock() } - fn chunks_done(&self) -> (usize, usize) { - (self.state_chunks.load(Ordering::Relaxed), self.block_chunks.load(Ordering::Relaxed)) - } - fn begin_restore(&self, manifest: ManifestData) { self.io_channel.send(ClientIoMessage::BeginRestoration(manifest)) .expect("snapshot service and io service are kept alive by client service; qed"); @@ -520,37 +542,58 @@ impl SnapshotService for Service { } } +impl Drop for Service { + fn drop(&mut self) { + self.abort_restore(); + } +} + #[cfg(test)] mod tests { + use std::sync::Arc; use service::ClientIoMessage; use io::{IoService}; use devtools::RandomTempPath; use tests::helpers::get_test_spec; use util::journaldb::Algorithm; - - use snapshot::ManifestData; + use error::Error; + use snapshot::{ManifestData, RestorationStatus, SnapshotService}; use super::*; + struct NoopDBRestore; + impl DatabaseRestore for NoopDBRestore { + fn restore_db(&self, _new_db: &str) -> Result<(), Error> { + Ok(()) + } + } + #[test] fn sends_async_messages() { let service = IoService::::start().unwrap(); + let spec = get_test_spec(); let dir = RandomTempPath::new(); let mut dir = dir.as_path().to_owned(); - dir.push("pruning"); - dir.push("db"); + let mut client_db = dir.clone(); + dir.push("snapshot"); + client_db.push("client"); - let service = Service::new( - &get_test_spec(), - Algorithm::Archive, - dir, - service.channel() - ).unwrap(); + let snapshot_params = ServiceParams { + engine: spec.engine.clone(), + genesis_block: spec.genesis_block(), + db_config: Default::default(), + pruning: Algorithm::Archive, + channel: service.channel(), + snapshot_root: dir, + client_db: client_db, + db_restore: Arc::new(NoopDBRestore), + }; + + let service = Service::new(snapshot_params).unwrap(); assert!(service.manifest().is_none()); assert!(service.chunk(Default::default()).is_none()); assert_eq!(service.status(), RestorationStatus::Inactive); - assert_eq!(service.chunks_done(), (0, 0)); let manifest = ManifestData { state_hashes: vec![], diff --git a/ethcore/src/snapshot/snapshot_service_trait.rs b/ethcore/src/snapshot/snapshot_service_trait.rs new file mode 100644 index 000000000..7df90c943 --- /dev/null +++ b/ethcore/src/snapshot/snapshot_service_trait.rs @@ -0,0 +1,54 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use super::{ManifestData, RestorationStatus}; +use util::{Bytes, H256}; +use ipc::IpcConfig; + +/// The interface for a snapshot network service. +/// This handles: +/// - restoration of snapshots to temporary databases. +/// - responding to queries for snapshot manifests and chunks +#[derive(Ipc)] +#[ipc(client_ident="RemoteSnapshotService")] +pub trait SnapshotService : Sync + Send { + /// Query the most recent manifest data. + fn manifest(&self) -> Option; + + /// Get raw chunk for a given hash. + fn chunk(&self, hash: H256) -> Option; + + /// Ask the snapshot service for the restoration status. + fn status(&self) -> RestorationStatus; + + /// Begin snapshot restoration. + /// If restoration in-progress, this will reset it. + /// From this point on, any previous snapshot may become unavailable. + fn begin_restore(&self, manifest: ManifestData); + + /// Abort an in-progress restoration if there is one. + fn abort_restore(&self); + + /// Feed a raw state chunk to the service to be processed asynchronously. + /// no-op if not currently restoring. + fn restore_state_chunk(&self, hash: H256, chunk: Bytes); + + /// Feed a raw block chunk to the service to be processed asynchronously. + /// no-op if currently restoring. + fn restore_block_chunk(&self, hash: H256, chunk: Bytes); +} + +impl IpcConfig for SnapshotService { } diff --git a/ethcore/src/snapshot/watcher.rs b/ethcore/src/snapshot/watcher.rs new file mode 100644 index 000000000..65f47efc8 --- /dev/null +++ b/ethcore/src/snapshot/watcher.rs @@ -0,0 +1,203 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Watcher for snapshot-related chain events. + +use client::{BlockChainClient, Client, ChainNotify}; +use ids::BlockID; +use service::ClientIoMessage; +use views::HeaderView; + +use io::IoChannel; +use util::hash::H256; + +use std::sync::Arc; + +// helper trait for transforming hashes to numbers and checking if syncing. +trait Oracle: Send + Sync { + fn to_number(&self, hash: H256) -> Option; + + fn is_major_syncing(&self) -> bool; +} + +struct StandardOracle where F: 'static + Send + Sync + Fn() -> bool { + client: Arc, + sync_status: F, +} + +impl Oracle for StandardOracle + where F: Send + Sync + Fn() -> bool +{ + fn to_number(&self, hash: H256) -> Option { + self.client.block_header(BlockID::Hash(hash)).map(|h| HeaderView::new(&h).number()) + } + + fn is_major_syncing(&self) -> bool { + let queue_info = self.client.queue_info(); + + (self.sync_status)() || queue_info.unverified_queue_size + queue_info.verified_queue_size > 3 + } +} + +// helper trait for broadcasting a block to take a snapshot at. +trait Broadcast: Send + Sync { + fn take_at(&self, num: Option); +} + +impl Broadcast for IoChannel { + fn take_at(&self, num: Option) { + let num = match num { + Some(n) => n, + None => return, + }; + + trace!(target: "snapshot_watcher", "broadcast: {}", num); + + if let Err(e) = self.send(ClientIoMessage::TakeSnapshot(num)) { + warn!("Snapshot watcher disconnected from IoService: {}", e); + } + } +} + +/// A `ChainNotify` implementation which will trigger a snapshot event +/// at certain block numbers. +pub struct Watcher { + oracle: Box, + broadcast: Box, + period: u64, + history: u64, +} + +impl Watcher { + /// Create a new `Watcher` which will trigger a snapshot event + /// once every `period` blocks, but only after that block is + /// `history` blocks old. + pub fn new(client: Arc, sync_status: F, channel: IoChannel, period: u64, history: u64) -> Self + where F: 'static + Send + Sync + Fn() -> bool + { + Watcher { + oracle: Box::new(StandardOracle { + client: client, + sync_status: sync_status, + }), + broadcast: Box::new(channel), + period: period, + history: history, + } + } +} + +impl ChainNotify for Watcher { + fn new_blocks( + &self, + imported: Vec, + _: Vec, + _: Vec, + _: Vec, + _: Vec, + _duration: u64) + { + if self.oracle.is_major_syncing() { return } + + trace!(target: "snapshot_watcher", "{} imported", imported.len()); + + let highest = imported.into_iter() + .filter_map(|h| self.oracle.to_number(h)) + .filter(|&num| num >= self.period + self.history) + .map(|num| num - self.history) + .filter(|num| num % self.period == 0) + .fold(0, ::std::cmp::max); + + match highest { + 0 => self.broadcast.take_at(None), + _ => self.broadcast.take_at(Some(highest)), + } + } +} + +#[cfg(test)] +mod tests { + use super::{Broadcast, Oracle, Watcher}; + + use client::ChainNotify; + + use util::{H256, U256}; + + use std::collections::HashMap; + + struct TestOracle(HashMap); + + impl Oracle for TestOracle { + fn to_number(&self, hash: H256) -> Option { + self.0.get(&hash).cloned() + } + + fn is_major_syncing(&self) -> bool { false } + } + + struct TestBroadcast(Option); + impl Broadcast for TestBroadcast { + fn take_at(&self, num: Option) { + if num != self.0 { + panic!("Watcher broadcast wrong number. Expected {:?}, found {:?}", self.0, num); + } + } + } + + // helper harness for tests which expect a notification. + fn harness(numbers: Vec, period: u64, history: u64, expected: Option) { + let hashes: Vec<_> = numbers.clone().into_iter().map(|x| H256::from(U256::from(x))).collect(); + let map = hashes.clone().into_iter().zip(numbers).collect(); + + let watcher = Watcher { + oracle: Box::new(TestOracle(map)), + broadcast: Box::new(TestBroadcast(expected)), + period: period, + history: history, + }; + + watcher.new_blocks( + hashes, + vec![], + vec![], + vec![], + vec![], + 0, + ); + } + + // helper + + #[test] + fn should_not_fire() { + harness(vec![0], 5, 0, None); + } + + #[test] + fn fires_once_for_two() { + harness(vec![14, 15], 10, 5, Some(10)); + } + + #[test] + fn finds_highest() { + harness(vec![15, 25], 10, 5, Some(20)); + } + + #[test] + fn doesnt_fire_before_history() { + harness(vec![10, 11], 10, 5, None); + } +} \ No newline at end of file diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index 1b3e8853e..1eae0f3b3 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -245,18 +245,21 @@ impl Spec { } /// Loads spec from json file. - pub fn load(reader: &[u8]) -> Self { - From::from(ethjson::spec::Spec::load(reader).expect("invalid json file")) + pub fn load(reader: R) -> Result where R: Read { + match ethjson::spec::Spec::load(reader) { + Ok(spec) => Ok(spec.into()), + _ => Err("Spec json is invalid".into()), + } } /// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus. - pub fn new_test() -> Spec { - Spec::load(include_bytes!("../../res/null_morden.json")) + pub fn new_test() -> Self { + Spec::load(include_bytes!("../../res/null_morden.json") as &[u8]).expect("null_morden.json is invalid") } /// Create a new Spec which is a NullEngine consensus with a premine of address whose secret is sha3(''). - pub fn new_null() -> Spec { - Spec::load(include_bytes!("../../res/null.json")) + pub fn new_null() -> Self { + Spec::load(include_bytes!("../../res/null.json") as &[u8]).expect("null.json is invalid") } } @@ -268,6 +271,12 @@ mod tests { use views::*; use super::*; + // https://github.com/ethcore/parity/issues/1840 + #[test] + fn test_load_empty() { + assert!(Spec::load(&vec![] as &[u8]).is_err()); + } + #[test] fn test_chain() { let test_spec = Spec::new_test(); diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index 99aae1078..ff4e09dc9 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -28,7 +28,16 @@ use rlp::{Rlp, View}; fn imports_from_empty() { let dir = RandomTempPath::new(); let spec = get_test_spec(); - let client = Client::new(ClientConfig::default(), &spec, dir.as_path(), Arc::new(Miner::with_spec(&spec)), IoChannel::disconnected()).unwrap(); + let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + + let client = Client::new( + ClientConfig::default(), + &spec, + dir.as_path(), + Arc::new(Miner::with_spec(&spec)), + IoChannel::disconnected(), + &db_config + ).unwrap(); client.import_verified_blocks(); client.flush_queue(); } @@ -37,7 +46,16 @@ fn imports_from_empty() { fn should_return_registrar() { let dir = RandomTempPath::new(); let spec = ethereum::new_morden(); - let client = Client::new(ClientConfig::default(), &spec, dir.as_path(), Arc::new(Miner::with_spec(&spec)), IoChannel::disconnected()).unwrap(); + let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + + let client = Client::new( + ClientConfig::default(), + &spec, + dir.as_path(), + Arc::new(Miner::with_spec(&spec)), + IoChannel::disconnected(), + &db_config + ).unwrap(); assert_eq!(client.additional_params().get("registrar"), Some(&"8e4e9b13d4b45cb0befc93c3061b1408f67316b2".to_owned())); } @@ -55,7 +73,16 @@ fn returns_state_root_basic() { fn imports_good_block() { let dir = RandomTempPath::new(); let spec = get_test_spec(); - let client = Client::new(ClientConfig::default(), &spec, dir.as_path(), Arc::new(Miner::with_spec(&spec)), IoChannel::disconnected()).unwrap(); + let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + + let client = Client::new( + ClientConfig::default(), + &spec, + dir.as_path(), + Arc::new(Miner::with_spec(&spec)), + IoChannel::disconnected(), + &db_config + ).unwrap(); let good_block = get_good_dummy_block(); if let Err(_) = client.import_block(good_block) { panic!("error importing block being good by definition"); @@ -71,8 +98,16 @@ fn imports_good_block() { fn query_none_block() { let dir = RandomTempPath::new(); let spec = get_test_spec(); - let client = Client::new(ClientConfig::default(), &spec, dir.as_path(), Arc::new(Miner::with_spec(&spec)), IoChannel::disconnected()).unwrap(); + let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + let client = Client::new( + ClientConfig::default(), + &spec, + dir.as_path(), + Arc::new(Miner::with_spec(&spec)), + IoChannel::disconnected(), + &db_config + ).unwrap(); let non_existant = client.block_header(BlockID::Number(188)); assert!(non_existant.is_none()); } diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index e05c82c55..c1f99f434 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -133,9 +133,17 @@ pub fn generate_dummy_client_with_data(block_number: u32, txs_per_block: usize, pub fn generate_dummy_client_with_spec_and_data(get_test_spec: F, block_number: u32, txs_per_block: usize, tx_gas_prices: &[U256]) -> GuardedTempResult> where F: Fn()->Spec { let dir = RandomTempPath::new(); - let test_spec = get_test_spec(); - let client = Client::new(ClientConfig::default(), &test_spec, dir.as_path(), Arc::new(Miner::with_spec(&test_spec)), IoChannel::disconnected()).unwrap(); + let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + + let client = Client::new( + ClientConfig::default(), + &test_spec, + dir.as_path(), + Arc::new(Miner::with_spec(&test_spec)), + IoChannel::disconnected(), + &db_config + ).unwrap(); let test_engine = &*test_spec.engine; let mut db_result = get_temp_journal_db(); @@ -233,7 +241,17 @@ pub fn push_blocks_to_client(client: &Arc, timestamp_salt: u64, starting pub fn get_test_client_with_blocks(blocks: Vec) -> GuardedTempResult> { let dir = RandomTempPath::new(); let test_spec = get_test_spec(); - let client = Client::new(ClientConfig::default(), &test_spec, dir.as_path(), Arc::new(Miner::with_spec(&test_spec)), IoChannel::disconnected()).unwrap(); + let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + + let client = Client::new( + ClientConfig::default(), + &test_spec, + dir.as_path(), + Arc::new(Miner::with_spec(&test_spec)), + IoChannel::disconnected(), + &db_config + ).unwrap(); + for block in &blocks { if let Err(_) = client.import_block(block.clone()) { panic!("panic importing block which is well-formed"); diff --git a/ethcore/src/tests/rpc.rs b/ethcore/src/tests/rpc.rs index afd2cd6a7..202e42988 100644 --- a/ethcore/src/tests/rpc.rs +++ b/ethcore/src/tests/rpc.rs @@ -25,18 +25,23 @@ use devtools::*; use miner::Miner; use crossbeam; use io::IoChannel; +use util::kvdb::DatabaseConfig; pub fn run_test_worker(scope: &crossbeam::Scope, stop: Arc, socket_path: &str) { let socket_path = socket_path.to_owned(); scope.spawn(move || { let temp = RandomTempPath::create_dir(); let spec = get_test_spec(); + let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + let client = Client::new( ClientConfig::default(), &spec, temp.as_path(), Arc::new(Miner::with_spec(&spec)), - IoChannel::disconnected()).unwrap(); + IoChannel::disconnected(), + &db_config + ).unwrap(); let mut worker = nanoipc::Worker::new(&(client as Arc)); worker.add_reqrep(&socket_path).unwrap(); while !stop.load(Ordering::Relaxed) { diff --git a/ethcore/src/types/mod.rs.in b/ethcore/src/types/mod.rs.in index e7731d1cc..0537fe056 100644 --- a/ethcore/src/types/mod.rs.in +++ b/ethcore/src/types/mod.rs.in @@ -31,3 +31,5 @@ pub mod trace_filter; pub mod call_analytics; pub mod transaction_import; pub mod block_import_error; +pub mod restoration_status; +pub mod snapshot_manifest; diff --git a/ethcore/src/types/restoration_status.rs b/ethcore/src/types/restoration_status.rs new file mode 100644 index 000000000..2840d9416 --- /dev/null +++ b/ethcore/src/types/restoration_status.rs @@ -0,0 +1,34 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Restoration status type definition + +/// Statuses for restorations. +#[derive(PartialEq, Eq, Clone, Copy, Debug, Binary)] +pub enum RestorationStatus { + /// No restoration. + Inactive, + /// Ongoing restoration. + Ongoing { + /// Number of state chunks completed. + state_chunks_done: u32, + /// Number of block chunks completed. + block_chunks_done: u32, + }, + /// Failed restoration. + Failed, +} + diff --git a/ethcore/src/types/snapshot_manifest.rs b/ethcore/src/types/snapshot_manifest.rs new file mode 100644 index 000000000..859ec016f --- /dev/null +++ b/ethcore/src/types/snapshot_manifest.rs @@ -0,0 +1,70 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Snapshot manifest type definition + +use util::hash::H256; +use rlp::*; +use util::Bytes; + +/// Manifest data. +#[derive(Debug, Clone, PartialEq, Eq, Binary)] +pub struct ManifestData { + /// List of state chunk hashes. + pub state_hashes: Vec, + /// List of block chunk hashes. + pub block_hashes: Vec, + /// The final, expected state root. + pub state_root: H256, + /// Block number this snapshot was taken at. + pub block_number: u64, + /// Block hash this snapshot was taken at. + pub block_hash: H256, +} + +impl ManifestData { + /// Encode the manifest data to rlp. + pub fn into_rlp(self) -> Bytes { + let mut stream = RlpStream::new_list(5); + stream.append(&self.state_hashes); + stream.append(&self.block_hashes); + stream.append(&self.state_root); + stream.append(&self.block_number); + stream.append(&self.block_hash); + + stream.out() + } + + /// Try to restore manifest data from raw bytes, interpreted as RLP. + pub fn from_rlp(raw: &[u8]) -> Result { + let decoder = UntrustedRlp::new(raw); + + let state_hashes: Vec = try!(decoder.val_at(0)); + let block_hashes: Vec = try!(decoder.val_at(1)); + let state_root: H256 = try!(decoder.val_at(2)); + let block_number: u64 = try!(decoder.val_at(3)); + let block_hash: H256 = try!(decoder.val_at(4)); + + Ok(ManifestData { + state_hashes: state_hashes, + block_hashes: block_hashes, + state_root: state_root, + block_number: block_number, + block_hash: block_hash, + }) + } +} + diff --git a/ethcore/src/verification/mod.rs b/ethcore/src/verification/mod.rs index 53c38a6b0..ed9c8ebc7 100644 --- a/ethcore/src/verification/mod.rs +++ b/ethcore/src/verification/mod.rs @@ -25,7 +25,7 @@ pub use self::canon_verifier::CanonVerifier; pub use self::noop_verifier::NoopVerifier; /// Verifier type. -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub enum VerifierType { /// Verifies block normally. Canon, diff --git a/parity/blockchain.rs b/parity/blockchain.rs index 0dd11b976..ccdf61130 100644 --- a/parity/blockchain.rs +++ b/parity/blockchain.rs @@ -19,7 +19,6 @@ use std::{io, fs}; use std::io::{BufReader, BufRead}; use std::time::Duration; use std::thread::sleep; -use std::path::Path; use std::sync::Arc; use rustc_serialize::hex::FromHex; use ethcore_logger::{setup_log, Config as LogConfig}; @@ -125,8 +124,9 @@ fn execute_import(cmd: ImportBlockchain) -> Result { // select pruning algorithm let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref()); - // prepare client_path + // prepare client and snapshot paths. let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm); + let snapshot_path = cmd.dirs.snapshot_path(genesis_hash, spec.fork_name.as_ref()); // execute upgrades try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile())); @@ -138,8 +138,9 @@ fn execute_import(cmd: ImportBlockchain) -> Result { let service = try!(ClientService::start( client_config, &spec, - Path::new(&client_path), - Path::new(&cmd.dirs.ipc_path()), + &client_path, + &snapshot_path, + &cmd.dirs.ipc_path(), Arc::new(Miner::with_spec(&spec)), ).map_err(|e| format!("Client service error: {:?}", e))); @@ -237,8 +238,9 @@ fn execute_export(cmd: ExportBlockchain) -> Result { // select pruning algorithm let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref()); - // prepare client_path + // prepare client and snapshot paths. let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm); + let snapshot_path = cmd.dirs.snapshot_path(genesis_hash, spec.fork_name.as_ref()); // execute upgrades try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile())); @@ -249,8 +251,9 @@ fn execute_export(cmd: ExportBlockchain) -> Result { let service = try!(ClientService::start( client_config, &spec, - Path::new(&client_path), - Path::new(&cmd.dirs.ipc_path()), + &client_path, + &snapshot_path, + &cmd.dirs.ipc_path(), Arc::new(Miner::with_spec(&spec)), ).map_err(|e| format!("Client service error: {:?}", e))); diff --git a/parity/cli.rs b/parity/cli.rs index 8f33489dc..bb46bda13 100644 --- a/parity/cli.rs +++ b/parity/cli.rs @@ -243,6 +243,8 @@ Snapshot Options: index, hash, or 'latest'. Note that taking snapshots at non-recent blocks will only work with --pruning archive [default: latest] + --no-periodic-snapshot Disable automated snapshots which usually occur once + every 10000 blocks. Virtual Machine Options: --jitvm Enable the JIT VM. @@ -382,6 +384,7 @@ pub struct Args { pub flag_from: String, pub flag_to: String, pub flag_at: String, + pub flag_no_periodic_snapshot: bool, pub flag_format: Option, pub flag_jitvm: bool, pub flag_log_file: Option, diff --git a/parity/configuration.rs b/parity/configuration.rs index f2fd34853..51d637580 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -226,6 +226,7 @@ impl Configuration { ui: self.args.cmd_ui, name: self.args.flag_identity, custom_bootnodes: self.args.flag_bootnodes.is_some(), + no_periodic_snapshot: self.args.flag_no_periodic_snapshot, }; Cmd::Run(run_cmd) }; @@ -802,6 +803,7 @@ mod tests { ui: false, name: "".into(), custom_bootnodes: false, + no_periodic_snapshot: false, })); } diff --git a/parity/dir.rs b/parity/dir.rs index f1f230163..d31e81e2c 100644 --- a/parity/dir.rs +++ b/parity/dir.rs @@ -52,10 +52,16 @@ impl Directories { Ok(()) } - /// Get the root path for database - pub fn db_version_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf { + /// Get the chain's root path. + pub fn chain_path(&self, genesis_hash: H256, fork_name: Option<&String>) -> PathBuf { let mut dir = Path::new(&self.db).to_path_buf(); dir.push(format!("{:?}{}", H64::from(genesis_hash), fork_name.map(|f| format!("-{}", f)).unwrap_or_default())); + dir + } + + /// Get the root path for database + pub fn db_version_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf { + let mut dir = self.chain_path(genesis_hash, fork_name); dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str())); dir } @@ -67,6 +73,13 @@ impl Directories { dir } + /// Get the path for the snapshot directory given the genesis hash and fork name. + pub fn snapshot_path(&self, genesis_hash: H256, fork_name: Option<&String>) -> PathBuf { + let mut dir = self.chain_path(genesis_hash, fork_name); + dir.push("snapshot"); + dir + } + /// Get the ipc sockets path pub fn ipc_path(&self) -> PathBuf { let mut dir = Path::new(&self.db).to_path_buf(); diff --git a/parity/main.rs b/parity/main.rs index 86844baa9..9c2ae7942 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -99,9 +99,11 @@ mod modules; mod account; mod blockchain; mod presale; -mod run; -mod sync; mod snapshot; +mod run; +#[cfg(feature="ipc")] +mod sync; +#[cfg(feature="ipc")] mod boot; #[cfg(feature="stratum")] @@ -158,10 +160,24 @@ mod stratum_optional { } } -fn main() { +#[cfg(not(feature="ipc"))] +fn sync_main() -> bool { + false +} + +#[cfg(feature="ipc")] +fn sync_main() -> bool { // just redirect to the sync::main() if std::env::args().nth(1).map_or(false, |arg| arg == "sync") { sync::main(); + true + } else { + false + } +} + +fn main() { + if sync_main() { return; } diff --git a/parity/modules.rs b/parity/modules.rs index 5edbca702..73de6ca29 100644 --- a/parity/modules.rs +++ b/parity/modules.rs @@ -18,6 +18,7 @@ use std::sync::Arc; use ethcore::client::BlockChainClient; use hypervisor::Hypervisor; use ethsync::{SyncConfig, NetworkConfiguration, NetworkError}; +use ethcore::snapshot::SnapshotService; #[cfg(not(feature="ipc"))] use self::no_ipc_deps::*; #[cfg(feature="ipc")] @@ -25,10 +26,12 @@ use self::ipc_deps::*; use ethcore_logger::Config as LogConfig; use std::path::Path; +#[cfg(feature="ipc")] pub mod service_urls { use std::path::PathBuf; pub const CLIENT: &'static str = "parity-chain.ipc"; + pub const SNAPSHOT: &'static str = "parity-snapshot.ipc"; pub const SYNC: &'static str = "parity-sync.ipc"; pub const SYNC_NOTIFY: &'static str = "parity-sync-notify.ipc"; pub const NETWORK_MANAGER: &'static str = "parity-manage-net.ipc"; @@ -119,6 +122,7 @@ pub fn sync sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, _client: Arc, + _snapshot_service: Arc, log_settings: &LogConfig, ) -> Result @@ -148,10 +152,11 @@ pub fn sync sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, client: Arc, + snapshot_service: Arc, _log_settings: &LogConfig, ) -> Result { - let eth_sync = try!(EthSync::new(sync_cfg, client, net_cfg)); + let eth_sync = try!(EthSync::new(sync_cfg, client, snapshot_service, net_cfg)); Ok((eth_sync.clone() as Arc, eth_sync.clone() as Arc, eth_sync.clone() as Arc)) } diff --git a/parity/params.rs b/parity/params.rs index 54a680414..c67520aa1 100644 --- a/parity/params.rs +++ b/parity/params.rs @@ -17,7 +17,7 @@ use std::str::FromStr; use std::fs; use std::time::Duration; -use util::{contents, H256, Address, U256, version_data}; +use util::{H256, Address, U256, version_data}; use util::journaldb::Algorithm; use ethcore::spec::Spec; use ethcore::ethereum; @@ -61,7 +61,10 @@ impl SpecType { SpecType::Testnet => Ok(ethereum::new_morden()), SpecType::Olympic => Ok(ethereum::new_olympic()), SpecType::Classic => Ok(ethereum::new_classic()), - SpecType::Custom(ref file) => Ok(Spec::load(&try!(contents(file).map_err(|_| "Could not load specification file.")))) + SpecType::Custom(ref filename) => { + let file = try!(fs::File::open(filename).map_err(|_| "Could not load specification file.")); + Spec::load(file) + } } } } diff --git a/parity/run.rs b/parity/run.rs index 8a68fe1af..720e6f1bf 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -15,7 +15,6 @@ // along with Parity. If not, see . use std::sync::{Arc, Mutex, Condvar}; -use std::path::Path; use std::io::ErrorKind; use ctrlc::CtrlC; use fdlimit::raise_fd_limit; @@ -28,7 +27,8 @@ use ethcore::client::{Mode, Switch, DatabaseCompactionProfile, VMType, ChainNoti use ethcore::service::ClientService; use ethcore::account_provider::AccountProvider; use ethcore::miner::{Miner, MinerService, ExternalMiner, MinerOptions}; -use ethsync::SyncConfig; +use ethcore::snapshot; +use ethsync::{SyncConfig, SyncProvider}; use informant::Informant; use rpc::{HttpServer, IpcServer, HttpConfiguration, IpcConfiguration}; @@ -46,6 +46,12 @@ use rpc_apis; use rpc; use url; +// how often to take periodic snapshots. +const SNAPSHOT_PERIOD: u64 = 10000; + +// how many blocks to wait before starting a periodic snapshot. +const SNAPSHOT_HISTORY: u64 = 500; + #[derive(Debug, PartialEq)] pub struct RunCmd { pub cache_config: CacheConfig, @@ -77,6 +83,7 @@ pub struct RunCmd { pub ui: bool, pub name: String, pub custom_bootnodes: bool, + pub no_periodic_snapshot: bool, } pub fn execute(cmd: RunCmd) -> Result<(), String> { @@ -102,8 +109,9 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { // select pruning algorithm let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, fork_name.as_ref()); - // prepare client_path + // prepare client and snapshot paths. let client_path = cmd.dirs.client_path(genesis_hash, fork_name.as_ref(), algorithm); + let snapshot_path = cmd.dirs.snapshot_path(genesis_hash, fork_name.as_ref()); // execute upgrades try!(execute_upgrades(&cmd.dirs, genesis_hash, fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile())); @@ -163,14 +171,15 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { } // create supervisor - let mut hypervisor = modules::hypervisor(Path::new(&cmd.dirs.ipc_path())); + let mut hypervisor = modules::hypervisor(&cmd.dirs.ipc_path()); // create client service. let service = try!(ClientService::start( client_config, &spec, - Path::new(&client_path), - Path::new(&cmd.dirs.ipc_path()), + &client_path, + &snapshot_path, + &cmd.dirs.ipc_path(), miner.clone(), ).map_err(|e| format!("Client service error: {:?}", e))); @@ -179,13 +188,14 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { // take handle to client let client = service.client(); + let snapshot_service = service.snapshot_service(); // create external miner let external_miner = Arc::new(ExternalMiner::default()); // create sync object let (sync_provider, manage_network, chain_notify) = try!(modules::sync( - &mut hypervisor, sync_config, net_conf.into(), client.clone(), &cmd.logger_config, + &mut hypervisor, sync_config, net_conf.into(), client.clone(), snapshot_service, &cmd.logger_config, ).map_err(|e| format!("Sync error: {}", e))); service.add_notify(chain_notify.clone()); @@ -250,6 +260,24 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { }); service.register_io_handler(io_handler).expect("Error registering IO handler"); + // the watcher must be kept alive. + let _watcher = match cmd.no_periodic_snapshot { + true => None, + false => { + let sync = sync_provider.clone(); + let watcher = Arc::new(snapshot::Watcher::new( + service.client(), + move || sync.status().is_major_syncing(), + service.io().channel(), + SNAPSHOT_PERIOD, + SNAPSHOT_HISTORY, + )); + + service.add_notify(watcher.clone()); + Some(watcher) + }, + }; + // start ui if cmd.ui { if !cmd.dapps_conf.enabled { diff --git a/parity/snapshot.rs b/parity/snapshot.rs index ecc463a2e..8c0bdd8fc 100644 --- a/parity/snapshot.rs +++ b/parity/snapshot.rs @@ -82,8 +82,9 @@ impl SnapshotCommand { // select pruning algorithm let algorithm = self.pruning.to_algorithm(&self.dirs, genesis_hash, spec.fork_name.as_ref()); - // prepare client_path + // prepare client and snapshot paths. let client_path = self.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm); + let snapshot_path = self.dirs.snapshot_path(genesis_hash, spec.fork_name.as_ref()); // execute upgrades try!(execute_upgrades(&self.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, self.compaction.compaction_profile())); @@ -94,8 +95,9 @@ impl SnapshotCommand { let service = try!(ClientService::start( client_config, &spec, - Path::new(&client_path), - Path::new(&self.dirs.ipc_path()), + &client_path, + &snapshot_path, + &self.dirs.ipc_path(), Arc::new(Miner::with_spec(&spec)) ).map_err(|e| format!("Client service error: {:?}", e))); @@ -129,10 +131,9 @@ impl SnapshotCommand { let informant_handle = snapshot.clone(); ::std::thread::spawn(move || { - while let RestorationStatus::Ongoing = informant_handle.status() { - let (state_chunks, block_chunks) = informant_handle.chunks_done(); + while let RestorationStatus::Ongoing { state_chunks_done, block_chunks_done } = informant_handle.status() { info!("Processed {}/{} state chunks and {}/{} block chunks.", - state_chunks, num_state, block_chunks, num_blocks); + state_chunks_done, num_state, block_chunks_done, num_blocks); ::std::thread::sleep(Duration::from_secs(5)); } @@ -161,7 +162,7 @@ impl SnapshotCommand { } match snapshot.status() { - RestorationStatus::Ongoing => Err("Snapshot file is incomplete and missing chunks.".into()), + RestorationStatus::Ongoing { .. } => Err("Snapshot file is incomplete and missing chunks.".into()), RestorationStatus::Failed => Err("Snapshot restoration failed.".into()), RestorationStatus::Inactive => { info!("Restoration complete."); diff --git a/parity/sync.rs b/parity/sync.rs index 27e9d5a6a..85f771546 100644 --- a/parity/sync.rs +++ b/parity/sync.rs @@ -20,6 +20,7 @@ use std::sync::Arc; use std::sync::atomic::AtomicBool; use hypervisor::{SYNC_MODULE_ID, HYPERVISOR_IPC_URL, ControlService}; use ethcore::client::{RemoteClient, ChainNotify}; +use ethcore::snapshot::{RemoteSnapshotService}; use ethsync::{SyncProvider, EthSync, ManageNetwork, ServiceConfiguration}; use modules::service_urls; use boot; @@ -45,8 +46,9 @@ pub fn main() { .unwrap_or_else(|e| panic!("Fatal: error reading boot arguments ({:?})", e)); let remote_client = dependency!(RemoteClient, &service_urls::with_base(&service_config.io_path, service_urls::CLIENT)); + let remote_snapshot = dependency!(RemoteSnapshotService, &service_urls::with_base(&service_config.io_path, service_urls::SNAPSHOT)); - let sync = EthSync::new(service_config.sync, remote_client.service().clone(), service_config.net).unwrap(); + let sync = EthSync::new(service_config.sync, remote_client.service().clone(), remote_snapshot.service().clone(), service_config.net).unwrap(); let _ = boot::main_thread(); let service_stop = Arc::new(AtomicBool::new(false)); diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 7807c01eb..9487f020d 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -254,7 +254,8 @@ impl Eth for EthClient where let status = take_weak!(self.sync).status(); let res = match status.state { SyncState::Idle => SyncStatus::None, - SyncState::Waiting | SyncState::Blocks | SyncState::NewBlocks | SyncState::ChainHead => { + SyncState::Waiting | SyncState::Blocks | SyncState::NewBlocks | SyncState::ChainHead + | SyncState::SnapshotManifest | SyncState::SnapshotData | SyncState::SnapshotWaiting => { let current_block = U256::from(take_weak!(self.client).chain_info().best_block_number); let highest_block = U256::from(status.highest_block_number.unwrap_or(status.start_block_number)); diff --git a/rpc/src/v1/tests/eth.rs b/rpc/src/v1/tests/eth.rs index b7ad5b943..448fa4734 100644 --- a/rpc/src/v1/tests/eth.rs +++ b/rpc/src/v1/tests/eth.rs @@ -108,7 +108,16 @@ impl EthTester { let dir = RandomTempPath::new(); let account_provider = account_provider(); let miner_service = miner_service(&spec, account_provider.clone()); - let client = Client::new(ClientConfig::default(), &spec, dir.as_path(), miner_service.clone(), IoChannel::disconnected()).unwrap(); + + let db_config = ::util::kvdb::DatabaseConfig::with_columns(::ethcore::db::NUM_COLUMNS); + let client = Client::new( + ClientConfig::default(), + &spec, + dir.as_path(), + miner_service.clone(), + IoChannel::disconnected(), + &db_config + ).unwrap(); let sync_provider = sync_provider(); let external_miner = Arc::new(ExternalMiner::default()); @@ -286,7 +295,7 @@ const POSITIVE_NONCE_SPEC: &'static [u8] = br#"{ #[test] fn eth_transaction_count() { let secret = "8a283037bb19c4fed7b1c569e40c7dcff366165eb869110a1b11532963eb9cb2".into(); - let tester = EthTester::from_spec(Spec::load(TRANSACTION_COUNT_SPEC)); + let tester = EthTester::from_spec(Spec::load(TRANSACTION_COUNT_SPEC).expect("invalid chain spec")); let address = tester.accounts.insert_account(secret, "").unwrap(); tester.accounts.unlock_account_permanently(address, "".into()).unwrap(); @@ -412,7 +421,7 @@ fn verify_transaction_counts(name: String, chain: BlockChain) { #[test] fn starting_nonce_test() { - let tester = EthTester::from_spec(Spec::load(POSITIVE_NONCE_SPEC)); + let tester = EthTester::from_spec(Spec::load(POSITIVE_NONCE_SPEC).expect("invalid chain spec")); let address = Address::from(10); let sample = tester.handler.handle_request_sync(&(r#" diff --git a/rpc/src/v1/tests/helpers/sync_provider.rs b/rpc/src/v1/tests/helpers/sync_provider.rs index 94f7b4893..b83aff758 100644 --- a/rpc/src/v1/tests/helpers/sync_provider.rs +++ b/rpc/src/v1/tests/helpers/sync_provider.rs @@ -49,6 +49,8 @@ impl TestSyncProvider { num_peers: config.num_peers, num_active_peers: 0, mem_used: 0, + num_snapshot_chunks: 0, + snapshot_chunks_done: 0, }), } } diff --git a/sync/src/api.rs b/sync/src/api.rs index 5fcc7f9ca..721b29ef8 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -20,6 +20,7 @@ use network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId, use util::{U256, H256}; use io::{TimerToken}; use ethcore::client::{BlockChainClient, ChainNotify}; +use ethcore::snapshot::SnapshotService; use ethcore::header::BlockNumber; use sync_io::NetSyncIo; use chain::{ChainSync, SyncStatus}; @@ -76,14 +77,14 @@ pub struct EthSync { impl EthSync { /// Creates and register protocol with the network service - pub fn new(config: SyncConfig, chain: Arc, network_config: NetworkConfiguration) -> Result, NetworkError> { + pub fn new(config: SyncConfig, chain: Arc, snapshot_service: Arc, network_config: NetworkConfiguration) -> Result, NetworkError> { let inf_sync = InfinitySync::new(&config, chain.clone()); let chain_sync = ChainSync::new(config, &*chain); let service = try!(NetworkService::new(try!(network_config.into_basic()))); let sync = Arc::new(EthSync{ network: service, - eth_handler: Arc::new(SyncProtocolHandler { sync: RwLock::new(chain_sync), chain: chain.clone() }), - inf_handler: Arc::new(InfProtocolHandler { sync: RwLock::new(inf_sync), chain: chain }), + eth_handler: Arc::new(SyncProtocolHandler { sync: RwLock::new(chain_sync), chain: chain.clone(), snapshot_service: snapshot_service.clone() }), + inf_handler: Arc::new(InfProtocolHandler { sync: RwLock::new(inf_sync), chain: chain, snapshot_service: snapshot_service }), }); Ok(sync) @@ -102,6 +103,8 @@ impl SyncProvider for EthSync { struct SyncProtocolHandler { /// Shared blockchain client. chain: Arc, + /// Shared snapshot service. + snapshot_service: Arc, /// Sync strategy sync: RwLock, } @@ -112,27 +115,29 @@ impl NetworkProtocolHandler for SyncProtocolHandler { } fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { - ChainSync::dispatch_packet(&self.sync, &mut NetSyncIo::new(io, &*self.chain), *peer, packet_id, data); + ChainSync::dispatch_packet(&self.sync, &mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service), *peer, packet_id, data); } fn connected(&self, io: &NetworkContext, peer: &PeerId) { - self.sync.write().on_peer_connected(&mut NetSyncIo::new(io, &*self.chain), *peer); + self.sync.write().on_peer_connected(&mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service), *peer); } fn disconnected(&self, io: &NetworkContext, peer: &PeerId) { - self.sync.write().on_peer_aborting(&mut NetSyncIo::new(io, &*self.chain), *peer); + self.sync.write().on_peer_aborting(&mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service), *peer); } fn timeout(&self, io: &NetworkContext, _timer: TimerToken) { - self.sync.write().maintain_peers(&mut NetSyncIo::new(io, &*self.chain)); - self.sync.write().maintain_sync(&mut NetSyncIo::new(io, &*self.chain)); - self.sync.write().propagate_new_transactions(&mut NetSyncIo::new(io, &*self.chain)); + self.sync.write().maintain_peers(&mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service)); + self.sync.write().maintain_sync(&mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service)); + self.sync.write().propagate_new_transactions(&mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service)); } } struct InfProtocolHandler { /// Shared blockchain client. chain: Arc, + /// Shared snapshot service. + snapshot_service: Arc, /// Sync strategy sync: RwLock, } @@ -142,15 +147,15 @@ impl NetworkProtocolHandler for InfProtocolHandler { } fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { - InfinitySync::dispatch_packet(&self.sync, &mut NetSyncIo::new(io, &*self.chain), *peer, packet_id, data); + InfinitySync::dispatch_packet(&self.sync, &mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service), *peer, packet_id, data); } fn connected(&self, io: &NetworkContext, peer: &PeerId) { - self.sync.write().on_peer_connected(&mut NetSyncIo::new(io, &*self.chain), *peer); + self.sync.write().on_peer_connected(&mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service), *peer); } fn disconnected(&self, io: &NetworkContext, peer: &PeerId) { - self.sync.write().on_peer_aborting(&mut NetSyncIo::new(io, &*self.chain), *peer); + self.sync.write().on_peer_aborting(&mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service), *peer); } fn timeout(&self, _io: &NetworkContext, _timer: TimerToken) { @@ -167,7 +172,7 @@ impl ChainNotify for EthSync { _duration: u64) { self.network.with_context(ETH_PROTOCOL, |context| { - let mut sync_io = NetSyncIo::new(context, &*self.eth_handler.chain); + let mut sync_io = NetSyncIo::new(context, &*self.eth_handler.chain, &*self.eth_handler.snapshot_service); self.eth_handler.sync.write().chain_new_blocks( &mut sync_io, &imported, @@ -180,7 +185,7 @@ impl ChainNotify for EthSync { fn start(&self) { self.network.start().unwrap_or_else(|e| warn!("Error starting network: {:?}", e)); - self.network.register_protocol(self.eth_handler.clone(), ETH_PROTOCOL, &[62u8, 63u8]) + self.network.register_protocol(self.eth_handler.clone(), ETH_PROTOCOL, &[62u8, 63u8, 64u8]) .unwrap_or_else(|e| warn!("Error registering ethereum protocol: {:?}", e)); self.network.register_protocol(self.inf_handler.clone(), INF_PROTOCOL, &[1u8]) .unwrap_or_else(|e| warn!("Error registering infinity protocol: {:?}", e)); @@ -192,7 +197,7 @@ impl ChainNotify for EthSync { fn broadcast(&self, message: Vec) { self.network.with_context(ETH_PROTOCOL, |context| { - let mut sync_io = NetSyncIo::new(context, &*self.eth_handler.chain); + let mut sync_io = NetSyncIo::new(context, &*self.eth_handler.chain, &*self.eth_handler.snapshot_service); self.inf_handler.sync.write().propagate_packet(&mut sync_io, message.clone()); }); } @@ -245,7 +250,7 @@ impl ManageNetwork for EthSync { fn stop_network(&self) { self.network.with_context(ETH_PROTOCOL, |context| { - let mut sync_io = NetSyncIo::new(context, &*self.eth_handler.chain); + let mut sync_io = NetSyncIo::new(context, &*self.eth_handler.chain, &*self.eth_handler.snapshot_service); self.eth_handler.sync.write().abort(&mut sync_io); }); self.stop(); diff --git a/sync/src/chain.rs b/sync/src/chain.rs index e5e5de5dc..ea5e593f3 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -96,17 +96,19 @@ use ethcore::header::{BlockNumber, Header as BlockHeader}; use ethcore::client::{BlockChainClient, BlockStatus, BlockID, BlockChainInfo, BlockImportError}; use ethcore::error::*; use ethcore::block::Block; +use ethcore::snapshot::{ManifestData, RestorationStatus}; use sync_io::SyncIo; use time; use super::SyncConfig; use blocks::BlockCollection; +use snapshot::{Snapshot, ChunkType}; use rand::{thread_rng, Rng}; known_heap_size!(0, PeerInfo); type PacketDecodeError = DecoderError; -const PROTOCOL_VERSION: u8 = 63u8; +const PROTOCOL_VERSION: u8 = 64u8; const MAX_BODIES_TO_SEND: usize = 256; const MAX_HEADERS_TO_SEND: usize = 512; const MAX_NODE_DATA_TO_SEND: usize = 1024; @@ -136,14 +138,26 @@ const GET_NODE_DATA_PACKET: u8 = 0x0d; const NODE_DATA_PACKET: u8 = 0x0e; const GET_RECEIPTS_PACKET: u8 = 0x0f; const RECEIPTS_PACKET: u8 = 0x10; +const GET_SNAPSHOT_MANIFEST_PACKET: u8 = 0x11; +const SNAPSHOT_MANIFEST_PACKET: u8 = 0x12; +const GET_SNAPSHOT_DATA_PACKET: u8 = 0x13; +const SNAPSHOT_DATA_PACKET: u8 = 0x14; const HEADERS_TIMEOUT_SEC: f64 = 15f64; const BODIES_TIMEOUT_SEC: f64 = 5f64; const FORK_HEADER_TIMEOUT_SEC: f64 = 3f64; +const SNAPSHOT_MANIFEST_TIMEOUT_SEC: f64 = 3f64; +const SNAPSHOT_DATA_TIMEOUT_SEC: f64 = 10f64; #[derive(Copy, Clone, Eq, PartialEq, Debug)] /// Sync state pub enum SyncState { + /// Waiting for pv64 peers to start snapshot syncing + SnapshotManifest, + /// Downloading snapshot data + SnapshotData, + /// Waiting for snapshot restoration to complete + SnapshotWaiting, /// Downloading subchain heads ChainHead, /// Initial chain sync complete. Waiting for new packets @@ -177,10 +191,14 @@ pub struct SyncStatus { pub blocks_received: BlockNumber, /// Total number of connected peers pub num_peers: usize, - /// Total number of active peers + /// Total number of active peers. pub num_active_peers: usize, - /// Heap memory used in bytes + /// Heap memory used in bytes. pub mem_used: usize, + /// Snapshot chunks + pub num_snapshot_chunks: usize, + /// Snapshot chunks downloaded + pub snapshot_chunks_done: usize, } impl SyncStatus { @@ -207,6 +225,8 @@ enum PeerAsking { BlockHeaders, BlockBodies, Heads, + SnapshotManifest, + SnapshotData, } #[derive(Clone, Eq, PartialEq)] @@ -240,6 +260,8 @@ struct PeerInfo { asking_blocks: Vec, /// Holds requested header hash if currently requesting block header by hash asking_hash: Option, + /// Holds requested snapshot chunk hash if any. + asking_snapshot_data: Option, /// Request timestamp ask_time: f64, /// Holds a set of transactions recently sent to this peer to avoid spamming. @@ -248,6 +270,10 @@ struct PeerInfo { expired: bool, /// Peer fork confirmation status confirmation: ForkConfirmation, + /// Best snapshot hash + snapshot_hash: Option, + /// Best snapshot block number + snapshot_number: Option, } impl PeerInfo { @@ -293,6 +319,8 @@ pub struct ChainSync { network_id: U256, /// Optional fork block to check fork_block: Option<(BlockNumber, H256)>, + /// Snapshot downloader. + snapshot: Snapshot, } type RlpResponseResult = Result, PacketDecodeError>; @@ -301,8 +329,8 @@ impl ChainSync { /// Create a new instance of syncing strategy. pub fn new(config: SyncConfig, chain: &BlockChainClient) -> ChainSync { let chain = chain.chain_info(); - let mut sync = ChainSync { - state: SyncState::ChainHead, + ChainSync { + state: SyncState::Idle, starting_block: chain.best_block_number, highest_block: None, last_imported_block: chain.best_block_number, @@ -317,16 +345,15 @@ impl ChainSync { _max_download_ahead_blocks: max(MAX_HEADERS_TO_REQUEST, config.max_download_ahead_blocks), network_id: config.network_id, fork_block: config.fork_block, - }; - sync.reset(); - sync + snapshot: Snapshot::new(), + } } /// @returns Synchonization status pub fn status(&self) -> SyncStatus { SyncStatus { state: self.state.clone(), - protocol_version: 63, + protocol_version: if self.state == SyncState::SnapshotData { 64 } else { 63 }, network_id: self.network_id, start_block_number: self.starting_block, last_imported_block_number: Some(self.last_imported_block), @@ -335,6 +362,8 @@ impl ChainSync { blocks_total: match self.highest_block { Some(x) if x > self.starting_block => x - self.starting_block, _ => 0 }, num_peers: self.peers.values().filter(|p| p.is_allowed()).count(), num_active_peers: self.peers.values().filter(|p| p.is_allowed() && p.asking != PeerAsking::Nothing).count(), + num_snapshot_chunks: self.snapshot.total_chunks(), + snapshot_chunks_done: self.snapshot.done_chunks(), mem_used: self.blocks.heap_size() + self.peers.heap_size_of_children() @@ -350,8 +379,13 @@ impl ChainSync { #[cfg_attr(feature="dev", allow(for_kv_map))] // Because it's not possible to get `values_mut()` /// Reset sync. Clear all downloaded data but keep the queue - fn reset(&mut self) { + fn reset(&mut self, io: &mut SyncIo) { self.blocks.clear(); + self.snapshot.clear(); + if self.state == SyncState::SnapshotData { + debug!(target:"sync", "Aborting snapshot restore"); + io.snapshot_service().abort_restore(); + } for (_, ref mut p) in &mut self.peers { p.asking_blocks.clear(); p.asking_hash = None; @@ -368,7 +402,7 @@ impl ChainSync { /// Restart sync pub fn restart(&mut self, io: &mut SyncIo) { trace!(target: "sync", "Restarting"); - self.reset(); + self.reset(io); self.start_sync_round(io); self.continue_sync(io); } @@ -380,13 +414,19 @@ impl ChainSync { if self.active_peers.is_empty() { trace!(target: "sync", "No more active peers"); if self.state == SyncState::ChainHead { - self.complete_sync(); + self.complete_sync(io); } else { self.restart(io); } } } + fn start_snapshot_sync(&mut self, io: &mut SyncIo, peer_id: PeerId) { + self.snapshot.clear(); + self.request_snapshot_manifest(io, peer_id); + self.state = SyncState::SnapshotManifest; + } + /// Restart sync after bad block has been detected. May end up re-downloading up to QUEUE_SIZE blocks fn restart_on_bad_block(&mut self, io: &mut SyncIo) { // Do not assume that the block queue/chain still has our last_imported_block @@ -398,8 +438,9 @@ impl ChainSync { /// Called by peer to report status fn on_peer_status(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { + let protocol_version: u32 = try!(r.val_at(0)); let peer = PeerInfo { - protocol_version: try!(r.val_at(0)), + protocol_version: protocol_version, network_id: try!(r.val_at(1)), difficulty: Some(try!(r.val_at(2))), latest_hash: try!(r.val_at(3)), @@ -412,6 +453,9 @@ impl ChainSync { last_sent_transactions: HashSet::new(), expired: false, confirmation: if self.fork_block.is_none() { ForkConfirmation::Confirmed } else { ForkConfirmation::Unconfirmed }, + asking_snapshot_data: None, + snapshot_hash: if protocol_version == 64 { Some(try!(r.val_at(5))) } else { None }, + snapshot_number: if protocol_version == 64 { Some(try!(r.val_at(6))) } else { None }, }; trace!(target: "sync", "New peer {} (protocol: {}, network: {:?}, difficulty: {:?}, latest:{}, genesis:{})", peer_id, peer.protocol_version, peer.network_id, peer.difficulty, peer.latest_hash, peer.genesis); @@ -749,6 +793,96 @@ impl ChainSync { Ok(()) } + /// Called when snapshot manifest is downloaded from a peer. + fn on_snapshot_manifest(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { + if !self.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { + trace!(target: "sync", "Ignoring snapshot manifest from unconfirmed peer {}", peer_id); + return Ok(()); + } + self.clear_peer_download(peer_id); + if !self.reset_peer_asking(peer_id, PeerAsking::SnapshotManifest) || self.state != SyncState::SnapshotManifest { + trace!(target: "sync", "{}: Ignored unexpected manifest", peer_id); + self.continue_sync(io); + return Ok(()); + } + + let manifest_rlp = try!(r.at(0)); + let manifest = match ManifestData::from_rlp(&manifest_rlp.as_raw()) { + Err(e) => { + trace!(target: "sync", "{}: Ignored bad manifest: {:?}", peer_id, e); + io.disconnect_peer(peer_id); + self.continue_sync(io); + return Ok(()); + } + Ok(manifest) => manifest, + }; + self.snapshot.reset_to(&manifest, &manifest_rlp.as_raw().sha3()); + io.snapshot_service().begin_restore(manifest); + self.state = SyncState::SnapshotData; + + // give a task to the same peer first. + self.sync_peer(io, peer_id, false); + // give tasks to other peers + self.continue_sync(io); + Ok(()) + } + + /// Called when snapshot data is downloaded from a peer. + fn on_snapshot_data(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { + if !self.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { + trace!(target: "sync", "Ignoring snapshot data from unconfirmed peer {}", peer_id); + return Ok(()); + } + self.clear_peer_download(peer_id); + if !self.reset_peer_asking(peer_id, PeerAsking::SnapshotData) || self.state != SyncState::SnapshotData { + trace!(target: "sync", "{}: Ignored unexpected snapshot data", peer_id); + self.continue_sync(io); + return Ok(()); + } + + // check service status + match io.snapshot_service().status() { + RestorationStatus::Inactive | RestorationStatus::Failed => { + trace!(target: "sync", "{}: Snapshot restoration aborted", peer_id); + self.state = SyncState::Idle; + self.snapshot.clear(); + self.continue_sync(io); + return Ok(()); + }, + RestorationStatus::Ongoing { .. } => { + trace!(target: "sync", "{}: Snapshot restoration is ongoing", peer_id); + }, + } + + let snapshot_data: Bytes = try!(r.val_at(0)); + match self.snapshot.validate_chunk(&snapshot_data) { + Ok(ChunkType::Block(hash)) => { + trace!(target: "sync", "{}: Processing block chunk", peer_id); + io.snapshot_service().restore_block_chunk(hash, snapshot_data); + } + Ok(ChunkType::State(hash)) => { + trace!(target: "sync", "{}: Processing state chunk", peer_id); + io.snapshot_service().restore_state_chunk(hash, snapshot_data); + } + Err(()) => { + trace!(target: "sync", "{}: Got bad snapshot chunk", peer_id); + io.disconnect_peer(peer_id); + self.continue_sync(io); + return Ok(()); + } + } + + if self.snapshot.is_complete() { + // wait for snapshot restoration process to complete + self.state = SyncState::SnapshotWaiting; + } + // give a task to the same peer first. + self.sync_peer(io, peer_id, false); + // give tasks to other peers + self.continue_sync(io); + Ok(()) + } + /// Called by peer when it is disconnecting pub fn on_peer_aborting(&mut self, io: &mut SyncIo, peer: PeerId) { trace!(target: "sync", "== Disconnecting {}: {}", peer, io.peer_info(peer)); @@ -764,7 +898,7 @@ impl ChainSync { /// Called when a new peer is connected pub fn on_peer_connected(&mut self, io: &mut SyncIo, peer: PeerId) { trace!(target: "sync", "== Connected {}: {}", peer, io.peer_info(peer)); - if let Err(e) = self.send_status(io) { + if let Err(e) = self.send_status(io, peer) { debug!(target:"sync", "Error sending status request: {:?}", e); io.disable_peer(peer); } @@ -772,24 +906,27 @@ impl ChainSync { /// Resume downloading fn continue_sync(&mut self, io: &mut SyncIo) { - let mut peers: Vec<(PeerId, U256)> = self.peers.iter().filter_map(|(k, p)| - if p.can_sync() { Some((*k, p.difficulty.unwrap_or_else(U256::zero))) } else { None }).collect(); + let mut peers: Vec<(PeerId, U256, u32)> = self.peers.iter().filter_map(|(k, p)| + if p.can_sync() { Some((*k, p.difficulty.unwrap_or_else(U256::zero), p.protocol_version)) } else { None }).collect(); thread_rng().shuffle(&mut peers); //TODO: sort by rating + // prefer peers with higher protocol version + peers.sort_by(|&(_, _, ref v1), &(_, _, ref v2)| v1.cmp(v2)); trace!(target: "sync", "Syncing with {}/{} peers", self.active_peers.len(), peers.len()); - for (p, _) in peers { + for (p, _, _) in peers { if self.active_peers.contains(&p) { self.sync_peer(io, p, false); } } - if self.state != SyncState::Waiting && !self.peers.values().any(|p| p.asking != PeerAsking::Nothing && p.can_sync()) { - self.complete_sync(); + if self.state != SyncState::Waiting && self.state != SyncState::SnapshotWaiting + && !self.peers.values().any(|p| p.asking != PeerAsking::Nothing && p.can_sync()) { + self.complete_sync(io); } } /// Called after all blocks have been downloaded - fn complete_sync(&mut self) { + fn complete_sync(&mut self, io: &mut SyncIo) { trace!(target: "sync", "Sync complete"); - self.reset(); + self.reset(io); self.state = SyncState::Idle; } @@ -805,7 +942,7 @@ impl ChainSync { trace!(target: "sync", "Skipping deactivated peer"); return; } - let (peer_latest, peer_difficulty) = { + let (peer_latest, peer_difficulty, peer_snapshot_number, peer_snapshot_hash) = { let peer = self.peers.get_mut(&peer_id).unwrap(); if peer.asking != PeerAsking::Nothing || !peer.can_sync() { return; @@ -814,7 +951,11 @@ impl ChainSync { trace!(target: "sync", "Waiting for the block queue"); return; } - (peer.latest_hash.clone(), peer.difficulty.clone()) + if self.state == SyncState::SnapshotWaiting { + trace!(target: "sync", "Waiting for the snapshot restoration"); + return; + } + (peer.latest_hash.clone(), peer.difficulty.clone(), peer.snapshot_number.as_ref().cloned(), peer.snapshot_hash.as_ref().cloned()) }; let chain_info = io.chain().chain_info(); let td = chain_info.pending_total_difficulty; @@ -823,13 +964,18 @@ impl ChainSync { if force || self.state == SyncState::NewBlocks || peer_difficulty.map_or(true, |pd| pd > syncing_difficulty) { match self.state { SyncState::Idle => { - if self.last_imported_block < chain_info.best_block_number { - self.last_imported_block = chain_info.best_block_number; - self.last_imported_hash = chain_info.best_block_hash; + // check if we can start snapshot sync with this peer + if peer_snapshot_number.unwrap_or(0) > 0 && chain_info.best_block_number == 0 { + self.start_snapshot_sync(io, peer_id); + } else { + if self.last_imported_block < chain_info.best_block_number { + self.last_imported_block = chain_info.best_block_number; + self.last_imported_hash = chain_info.best_block_hash; + } + trace!(target: "sync", "Starting sync with {}", peer_id); + self.start_sync_round(io); + self.sync_peer(io, peer_id, force); } - trace!(target: "sync", "Starting sync with {}", peer_id); - self.start_sync_round(io); - self.sync_peer(io, peer_id, force); }, SyncState::ChainHead => { // Request subchain headers @@ -843,8 +989,14 @@ impl ChainSync { if io.chain().block_status(BlockID::Hash(peer_latest)) == BlockStatus::Unknown { self.request_blocks(io, peer_id, false); } - } - SyncState::Waiting => () + }, + SyncState::SnapshotData => { + if peer_snapshot_hash.is_some() && peer_snapshot_hash == self.snapshot.snapshot_hash() { + self.request_snapshot_data(io, peer_id); + } + }, + SyncState::SnapshotManifest => (), //already downloading from other peer + SyncState::Waiting | SyncState::SnapshotWaiting => () } } } @@ -903,6 +1055,16 @@ impl ChainSync { } } + /// Find some headers or blocks to download for a peer. + fn request_snapshot_data(&mut self, io: &mut SyncIo, peer_id: PeerId) { + self.clear_peer_download(peer_id); + // find chunk data to download + if let Some(hash) = self.snapshot.needed_chunk() { + self.peers.get_mut(&peer_id).unwrap().asking_snapshot_data = Some(hash.clone()); + self.request_snapshot_chunk(io, peer_id, &hash); + } + } + /// Clear all blocks/headers marked as being downloaded by a peer. fn clear_peer_download(&mut self, peer_id: PeerId) { let peer = self.peers.get_mut(&peer_id).unwrap(); @@ -917,9 +1079,15 @@ impl ChainSync { self.blocks.clear_body_download(b); } }, + PeerAsking::SnapshotData => { + if let Some(hash) = peer.asking_snapshot_data { + self.snapshot.clear_chunk_download(&hash); + } + }, _ => (), } peer.asking_blocks.clear(); + peer.asking_snapshot_data = None; } fn block_imported(&mut self, hash: &H256, number: BlockNumber, parent: &H256) { @@ -1016,6 +1184,22 @@ impl ChainSync { rlp.append(&if reverse {1u32} else {0u32}); self.send_request(sync, peer_id, asking, GET_BLOCK_HEADERS_PACKET, rlp.out()); } + + /// Request snapshot manifest from a peer. + fn request_snapshot_manifest(&mut self, sync: &mut SyncIo, peer_id: PeerId) { + trace!(target: "sync", "{} <- GetSnapshotManifest", peer_id); + let rlp = RlpStream::new_list(0); + self.send_request(sync, peer_id, PeerAsking::SnapshotManifest, GET_SNAPSHOT_MANIFEST_PACKET, rlp.out()); + } + + /// Request snapshot chunk from a peer. + fn request_snapshot_chunk(&mut self, sync: &mut SyncIo, peer_id: PeerId, chunk: &H256) { + trace!(target: "sync", "{} <- GetSnapshotData {:?}", peer_id, chunk); + let mut rlp = RlpStream::new_list(1); + rlp.append(chunk); + self.send_request(sync, peer_id, PeerAsking::SnapshotData, GET_SNAPSHOT_DATA_PACKET, rlp.out()); + } + /// Request block bodies from a peer fn request_bodies(&mut self, sync: &mut SyncIo, peer_id: PeerId, hashes: Vec) { let mut rlp = RlpStream::new_list(hashes.len()); @@ -1086,14 +1270,22 @@ impl ChainSync { } /// Send Status message - fn send_status(&mut self, io: &mut SyncIo) -> Result<(), NetworkError> { - let mut packet = RlpStream::new_list(5); + fn send_status(&mut self, io: &mut SyncIo, peer: PeerId) -> Result<(), NetworkError> { + let pv64 = io.eth_protocol_version(peer) >= 64; + let mut packet = RlpStream::new_list(if pv64 { 7 } else { 5 }); let chain = io.chain().chain_info(); packet.append(&(PROTOCOL_VERSION as u32)); packet.append(&self.network_id); packet.append(&chain.total_difficulty); packet.append(&chain.best_block_hash); packet.append(&chain.genesis_hash); + if pv64 { + let manifest = io.snapshot_service().manifest(); + let block_number = manifest.as_ref().map_or(0, |m| m.block_number); + let manifest_hash = manifest.map_or(H256::new(), |m| m.into_rlp().sha3()); + packet.append(&manifest_hash); + packet.append(&block_number); + } io.respond(STATUS_PACKET, packet.out()) } @@ -1230,6 +1422,48 @@ impl ChainSync { Ok(Some((RECEIPTS_PACKET, rlp_result))) } + /// Respond to GetSnapshotManifest request + fn return_snapshot_manifest(io: &SyncIo, r: &UntrustedRlp, peer_id: PeerId) -> RlpResponseResult { + let count = r.item_count(); + trace!(target: "sync", "{} -> GetSnapshotManifest", peer_id); + if count != 0 { + debug!(target: "sync", "Invalid GetSnapshotManifest request, ignoring."); + return Ok(None); + } + let rlp = match io.snapshot_service().manifest() { + Some(manifest) => { + trace!(target: "sync", "{} <- SnapshotManifest", peer_id); + let mut rlp = RlpStream::new_list(1); + rlp.append_raw(&manifest.into_rlp(), 1); + rlp + }, + None => { + trace!(target: "sync", "{}: No manifest to return", peer_id); + let rlp = RlpStream::new_list(0); + rlp + } + }; + Ok(Some((SNAPSHOT_MANIFEST_PACKET, rlp))) + } + + /// Respond to GetSnapshotManifest request + fn return_snapshot_data(io: &SyncIo, r: &UntrustedRlp, peer_id: PeerId) -> RlpResponseResult { + let hash: H256 = try!(r.val_at(0)); + trace!(target: "sync", "{} -> GetSnapshotData {:?}", peer_id, hash); + let rlp = match io.snapshot_service().chunk(hash) { + Some(data) => { + let mut rlp = RlpStream::new_list(1); + rlp.append(&data); + rlp + }, + None => { + let rlp = RlpStream::new_list(0); + rlp + } + }; + Ok(Some((SNAPSHOT_DATA_PACKET, rlp))) + } + fn return_rlp(io: &mut SyncIo, rlp: &UntrustedRlp, peer: PeerId, rlp_func: FRlp, error_func: FError) -> Result<(), PacketDecodeError> where FRlp : Fn(&SyncIo, &UntrustedRlp, PeerId) -> RlpResponseResult, FError : FnOnce(NetworkError) -> String @@ -1266,6 +1500,14 @@ impl ChainSync { ChainSync::return_node_data, |e| format!("Error sending nodes: {:?}", e)), + GET_SNAPSHOT_MANIFEST_PACKET => ChainSync::return_rlp(io, &rlp, peer, + ChainSync::return_snapshot_manifest, + |e| format!("Error sending snapshot manifest: {:?}", e)), + + GET_SNAPSHOT_DATA_PACKET => ChainSync::return_rlp(io, &rlp, peer, + ChainSync::return_snapshot_data, + |e| format!("Error sending snapshot data: {:?}", e)), + _ => { sync.write().on_packet(io, peer, packet_id, data); Ok(()) @@ -1289,6 +1531,8 @@ impl ChainSync { BLOCK_BODIES_PACKET => self.on_peer_block_bodies(io, peer, &rlp), NEW_BLOCK_PACKET => self.on_peer_new_block(io, peer, &rlp), NEW_BLOCK_HASHES_PACKET => self.on_peer_new_hashes(io, peer, &rlp), + SNAPSHOT_MANIFEST_PACKET => self.on_snapshot_manifest(io, peer, &rlp), + SNAPSHOT_DATA_PACKET => self.on_snapshot_data(io, peer, &rlp), _ => { debug!(target: "sync", "Unknown packet {}", packet_id); Ok(()) @@ -1308,6 +1552,8 @@ impl ChainSync { PeerAsking::BlockBodies => (tick - peer.ask_time) > BODIES_TIMEOUT_SEC, PeerAsking::Nothing => false, PeerAsking::ForkHeader => (tick - peer.ask_time) > FORK_HEADER_TIMEOUT_SEC, + PeerAsking::SnapshotManifest => (tick - peer.ask_time) > SNAPSHOT_MANIFEST_TIMEOUT_SEC, + PeerAsking::SnapshotData => (tick - peer.ask_time) > SNAPSHOT_DATA_TIMEOUT_SEC, }; if timeout { trace!(target:"sync", "Timeout {}", peer_id); @@ -1321,9 +1567,12 @@ impl ChainSync { } fn check_resume(&mut self, io: &mut SyncIo) { - if !io.chain().queue_info().is_full() && self.state == SyncState::Waiting { + if self.state == SyncState::Waiting && !io.chain().queue_info().is_full() && self.state == SyncState::Waiting { self.state = SyncState::Blocks; self.continue_sync(io); + } else if self.state == SyncState::SnapshotWaiting && io.snapshot_service().status() == RestorationStatus::Inactive { + self.state = SyncState::Idle; + self.continue_sync(io); } } @@ -1559,6 +1808,7 @@ impl ChainSync { #[cfg(test)] mod tests { use tests::helpers::*; + use tests::snapshot::TestSnapshotService; use super::*; use ::SyncConfig; use util::*; @@ -1612,7 +1862,8 @@ mod tests { fn return_receipts_empty() { let mut client = TestBlockChainClient::new(); let mut queue = VecDeque::new(); - let io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let io = TestIo::new(&mut client, &mut ss, &mut queue, None); let result = ChainSync::return_receipts(&io, &UntrustedRlp::new(&[0xc0]), 0); @@ -1624,7 +1875,8 @@ mod tests { let mut client = TestBlockChainClient::new(); let mut queue = VecDeque::new(); let sync = dummy_sync_with_peer(H256::new(), &client); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let mut receipt_list = RlpStream::new_list(4); receipt_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555")); @@ -1679,7 +1931,8 @@ mod tests { let hashes: Vec<_> = headers.iter().map(|h| HeaderView::new(h).sha3()).collect(); let mut queue = VecDeque::new(); - let io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let io = TestIo::new(&mut client, &mut ss, &mut queue, None); let unknown: H256 = H256::new(); let result = ChainSync::return_block_headers(&io, &UntrustedRlp::new(&make_hash_req(&unknown, 1, 0, false)), 0); @@ -1717,7 +1970,8 @@ mod tests { let mut client = TestBlockChainClient::new(); let mut queue = VecDeque::new(); let sync = dummy_sync_with_peer(H256::new(), &client); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let mut node_list = RlpStream::new_list(3); node_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555")); @@ -1758,6 +2012,9 @@ mod tests { last_sent_transactions: HashSet::new(), expired: false, confirmation: super::ForkConfirmation::Confirmed, + snapshot_number: None, + snapshot_hash: None, + asking_snapshot_data: None, }); sync } @@ -1769,7 +2026,8 @@ mod tests { let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(10), &client); let chain_info = client.chain_info(); - let io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let io = TestIo::new(&mut client, &mut ss, &mut queue, None); let lagging_peers = sync.get_lagging_peers(&chain_info, &io); @@ -1800,7 +2058,8 @@ mod tests { let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let chain_info = client.chain_info(); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let peers = sync.get_lagging_peers(&chain_info, &io); let peer_count = sync.propagate_new_hashes(&chain_info, &mut io, &peers); @@ -1820,7 +2079,8 @@ mod tests { let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let chain_info = client.chain_info(); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let peers = sync.get_lagging_peers(&chain_info, &io); let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[], &peers); @@ -1840,7 +2100,8 @@ mod tests { let hash = client.block_hash(BlockID::Number(99)).unwrap(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let chain_info = client.chain_info(); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let peers = sync.get_lagging_peers(&chain_info, &io); let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[hash.clone()], &peers); @@ -1859,7 +2120,8 @@ mod tests { client.insert_transaction_to_queue(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); let mut queue = VecDeque::new(); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let peer_count = sync.propagate_new_transactions(&mut io); // Try to propagate same transactions for the second time let peer_count2 = sync.propagate_new_transactions(&mut io); @@ -1880,7 +2142,8 @@ mod tests { client.insert_transaction_to_queue(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); let mut queue = VecDeque::new(); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let peer_count = sync.propagate_new_transactions(&mut io); sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[]); // Try to propagate same transactions for the second time @@ -1903,17 +2166,17 @@ mod tests { client.insert_transaction_to_queue(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); let mut queue = VecDeque::new(); + let mut ss = TestSnapshotService::new(); // should sent some { - - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let peer_count = sync.propagate_new_transactions(&mut io); assert_eq!(1, io.queue.len()); assert_eq!(1, peer_count); } // Insert some more client.insert_transaction_to_queue(); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); // Propagate new transactions let peer_count2 = sync.propagate_new_transactions(&mut io); // And now the peer should have all transactions @@ -1939,7 +2202,8 @@ mod tests { let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); //sync.have_common_block = true; - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let block = UntrustedRlp::new(&block_data); @@ -1957,7 +2221,8 @@ mod tests { let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let block = UntrustedRlp::new(&block_data); @@ -1972,7 +2237,8 @@ mod tests { client.add_blocks(10, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let empty_data = vec![]; let block = UntrustedRlp::new(&empty_data); @@ -1988,7 +2254,8 @@ mod tests { client.add_blocks(10, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let hashes_data = get_dummy_hashes(); let hashes_rlp = UntrustedRlp::new(&hashes_data); @@ -2004,7 +2271,8 @@ mod tests { client.add_blocks(10, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let empty_hashes_data = vec![]; let hashes_rlp = UntrustedRlp::new(&empty_hashes_data); @@ -2023,7 +2291,8 @@ mod tests { let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let chain_info = client.chain_info(); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let peers = sync.get_lagging_peers(&chain_info, &io); sync.propagate_new_hashes(&chain_info, &mut io, &peers); @@ -2042,7 +2311,8 @@ mod tests { let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let chain_info = client.chain_info(); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let peers = sync.get_lagging_peers(&chain_info, &io); sync.propagate_blocks(&chain_info, &mut io, &[], &peers); @@ -2076,7 +2346,8 @@ mod tests { // when { let mut queue = VecDeque::new(); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); io.chain.miner.chain_new_blocks(io.chain, &[], &[], &[], &good_blocks); sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[]); assert_eq!(io.chain.miner.status().transactions_in_future_queue, 0); @@ -2090,7 +2361,8 @@ mod tests { } { let mut queue = VecDeque::new(); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); io.chain.miner.chain_new_blocks(io.chain, &[], &[], &good_blocks, &retracted_blocks); sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks, &[]); } @@ -2114,7 +2386,8 @@ mod tests { let retracted_blocks = vec![client.block_hash_delta_minus(1)]; let mut queue = VecDeque::new(); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); // when sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[]); diff --git a/sync/src/infinity.rs b/sync/src/infinity.rs index 23886560e..810db32ed 100644 --- a/sync/src/infinity.rs +++ b/sync/src/infinity.rs @@ -18,6 +18,7 @@ use util::*; use network::*; +use rlp::{UntrustedRlp, DecoderError, RlpStream, View, Stream}; use ethcore::client::{BlockChainClient}; use sync_io::SyncIo; use super::SyncConfig; diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 8e6ece69b..0fa7d9f42 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -26,40 +26,6 @@ //! Implements ethereum protocol version 63 as specified here: //! https://github.com/ethereum/wiki/wiki/Ethereum-Wire-Protocol //! -//! Usage example: -//! -//! ```rust -//! extern crate ethcore_util as util; -//! extern crate ethcore_io as io; -//! extern crate ethcore; -//! extern crate ethsync; -//! use std::env; -//! use io::IoChannel; -//! use ethcore::client::{Client, ClientConfig}; -//! use ethsync::{EthSync, SyncConfig, ManageNetwork, NetworkConfiguration}; -//! use ethcore::ethereum; -//! use ethcore::miner::{GasPricer, Miner}; -//! -//! fn main() { -//! let dir = env::temp_dir(); -//! let spec = ethereum::new_frontier(); -//! let miner = Miner::new( -//! Default::default(), -//! GasPricer::new_fixed(20_000_000_000u64.into()), -//! &spec, -//! None -//! ); -//! let client = Client::new( -//! ClientConfig::default(), -//! &spec, -//! &dir, -//! miner, -//! IoChannel::disconnected() -//! ).unwrap(); -//! let sync = EthSync::new(SyncConfig::default(), client, NetworkConfiguration::from(NetworkConfiguration::new())).unwrap(); -//! sync.start_network(); -//! } -//! ``` extern crate ethcore_network as network; extern crate ethcore_io as io; @@ -84,6 +50,7 @@ mod chain; mod blocks; mod sync_io; mod infinity; +mod snapshot; #[cfg(test)] mod tests; @@ -97,4 +64,3 @@ pub use api::{EthSync, SyncProvider, SyncClient, NetworkManagerClient, ManageNet ServiceConfiguration, NetworkConfiguration}; pub use chain::{SyncStatus, SyncState}; pub use network::{is_valid_node_url, NonReservedPeerMode, NetworkError}; - diff --git a/sync/src/snapshot.rs b/sync/src/snapshot.rs new file mode 100644 index 000000000..ca9adf220 --- /dev/null +++ b/sync/src/snapshot.rs @@ -0,0 +1,200 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + + +use util::{H256, Hashable}; +use std::collections::HashSet; +use ethcore::snapshot::ManifestData; + +#[derive(PartialEq, Eq, Debug)] +pub enum ChunkType { + State(H256), + Block(H256), +} + +pub struct Snapshot { + pending_state_chunks: Vec, + pending_block_chunks: Vec, + downloading_chunks: HashSet, + completed_chunks: HashSet, + snapshot_hash: Option, +} + +impl Snapshot { + /// Create a new instance. + pub fn new() -> Snapshot { + Snapshot { + pending_state_chunks: Vec::new(), + pending_block_chunks: Vec::new(), + downloading_chunks: HashSet::new(), + completed_chunks: HashSet::new(), + snapshot_hash: None, + } + } + + /// Clear everything. + pub fn clear(&mut self) { + self.pending_state_chunks.clear(); + self.pending_block_chunks.clear(); + self.downloading_chunks.clear(); + self.completed_chunks.clear(); + self.snapshot_hash = None; + } + + /// Reset collection for a manifest RLP + pub fn reset_to(&mut self, manifest: &ManifestData, hash: &H256) { + self.clear(); + self.pending_state_chunks = manifest.state_hashes.clone(); + self.pending_block_chunks = manifest.block_hashes.clone(); + self.snapshot_hash = Some(hash.clone()); + } + + /// Validate chunk and mark it as downloaded + pub fn validate_chunk(&mut self, chunk: &[u8]) -> Result { + let hash = chunk.sha3(); + if self.completed_chunks.contains(&hash) { + trace!(target: "sync", "Ignored proccessed chunk: {}", hash.hex()); + return Err(()); + } + self.downloading_chunks.remove(&hash); + if self.pending_block_chunks.iter().any(|h| h == &hash) { + self.completed_chunks.insert(hash.clone()); + return Ok(ChunkType::Block(hash)); + } + if self.pending_state_chunks.iter().any(|h| h == &hash) { + self.completed_chunks.insert(hash.clone()); + return Ok(ChunkType::State(hash)); + } + trace!(target: "sync", "Ignored unknown chunk: {}", hash.hex()); + Err(()) + } + + /// Find a chunk to download + pub fn needed_chunk(&mut self) -> Option { + // check state chunks first + let mut chunk = self.pending_state_chunks.iter() + .find(|&h| !self.downloading_chunks.contains(h) && !self.completed_chunks.contains(h)) + .cloned(); + if chunk.is_none() { + chunk = self.pending_block_chunks.iter() + .find(|&h| !self.downloading_chunks.contains(h) && !self.completed_chunks.contains(h)) + .cloned(); + } + + if let Some(hash) = chunk { + self.downloading_chunks.insert(hash.clone()); + } + chunk + } + + pub fn clear_chunk_download(&mut self, hash: &H256) { + self.downloading_chunks.remove(hash); + } + + pub fn snapshot_hash(&self) -> Option { + self.snapshot_hash + } + + pub fn total_chunks(&self) -> usize { + self.pending_block_chunks.len() + self.pending_state_chunks.len() + } + + pub fn done_chunks(&self) -> usize { + self.total_chunks() - self.completed_chunks.len() + } + + pub fn is_complete(&self) -> bool { + self.total_chunks() == self.completed_chunks.len() + } +} + +#[cfg(test)] +mod test { + use util::*; + use super::*; + use ethcore::snapshot::ManifestData; + + fn is_empty(snapshot: &Snapshot) -> bool { + snapshot.pending_block_chunks.is_empty() && + snapshot.pending_state_chunks.is_empty() && + snapshot.completed_chunks.is_empty() && + snapshot.downloading_chunks.is_empty() && + snapshot.snapshot_hash.is_none() + } + + fn test_manifest() -> (ManifestData, H256, Vec, Vec) { + let state_chunks: Vec = (0..20).map(|_| H256::random().to_vec()).collect(); + let block_chunks: Vec = (0..20).map(|_| H256::random().to_vec()).collect(); + let manifest = ManifestData { + state_hashes: state_chunks.iter().map(|data| data.sha3()).collect(), + block_hashes: block_chunks.iter().map(|data| data.sha3()).collect(), + state_root: H256::new(), + block_number: 42, + block_hash: H256::new(), + }; + let mhash = manifest.clone().into_rlp().sha3(); + (manifest, mhash, state_chunks, block_chunks) + } + + #[test] + fn create_clear() { + let mut snapshot = Snapshot::new(); + assert!(is_empty(&snapshot)); + let (manifest, mhash, _, _,) = test_manifest(); + snapshot.reset_to(&manifest, &mhash); + assert!(!is_empty(&snapshot)); + snapshot.clear(); + assert!(is_empty(&snapshot)); + } + + #[test] + fn validate_chunks() { + let mut snapshot = Snapshot::new(); + let (manifest, mhash, state_chunks, block_chunks) = test_manifest(); + snapshot.reset_to(&manifest, &mhash); + assert!(snapshot.validate_chunk(&H256::random().to_vec()).is_err()); + + let requested: Vec = (0..40).map(|_| snapshot.needed_chunk().unwrap()).collect(); + assert!(snapshot.needed_chunk().is_none()); + assert_eq!(&requested[0..20], &manifest.state_hashes[..]); + assert_eq!(&requested[20..40], &manifest.block_hashes[..]); + assert_eq!(snapshot.downloading_chunks.len(), 40); + + assert_eq!(snapshot.validate_chunk(&state_chunks[4]), Ok(ChunkType::State(manifest.state_hashes[4].clone()))); + assert_eq!(snapshot.completed_chunks.len(), 1); + assert_eq!(snapshot.downloading_chunks.len(), 39); + + assert_eq!(snapshot.validate_chunk(&block_chunks[10]), Ok(ChunkType::Block(manifest.block_hashes[10].clone()))); + assert_eq!(snapshot.completed_chunks.len(), 2); + assert_eq!(snapshot.downloading_chunks.len(), 38); + + for (i, data) in state_chunks.iter().enumerate() { + if i != 4 { + assert!(snapshot.validate_chunk(data).is_ok()); + } + } + + for (i, data) in block_chunks.iter().enumerate() { + if i != 10 { + assert!(snapshot.validate_chunk(data).is_ok()); + } + } + + assert!(snapshot.is_complete()); + assert_eq!(snapshot.snapshot_hash(), Some(manifest.into_rlp().sha3())); + } +} + diff --git a/sync/src/sync_io.rs b/sync/src/sync_io.rs index 91070adc5..fa95941ea 100644 --- a/sync/src/sync_io.rs +++ b/sync/src/sync_io.rs @@ -16,6 +16,8 @@ use network::{NetworkContext, PeerId, PacketId, NetworkError}; use ethcore::client::BlockChainClient; +use ethcore::snapshot::SnapshotService; +use api::ETH_PROTOCOL; /// IO interface for the syning handler. /// Provides peer connection management and an interface to the blockchain client. @@ -31,10 +33,14 @@ pub trait SyncIo { fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec) -> Result<(), NetworkError>; /// Get the blockchain fn chain(&self) -> &BlockChainClient; + /// Get the snapshot service. + fn snapshot_service(&self) -> &SnapshotService; /// Returns peer client identifier string fn peer_info(&self, peer_id: PeerId) -> String { peer_id.to_string() } + /// Maximum mutuallt supported ETH protocol version + fn eth_protocol_version(&self, peer_id: PeerId) -> u8; /// Returns if the chain block queue empty fn is_chain_queue_empty(&self) -> bool { self.chain().queue_info().is_empty() @@ -46,15 +52,17 @@ pub trait SyncIo { /// Wraps `NetworkContext` and the blockchain client pub struct NetSyncIo<'s, 'h> where 'h: 's { network: &'s NetworkContext<'h>, - chain: &'s BlockChainClient + chain: &'s BlockChainClient, + snapshot_service: &'s SnapshotService, } impl<'s, 'h> NetSyncIo<'s, 'h> { /// Creates a new instance from the `NetworkContext` and the blockchain client reference. - pub fn new(network: &'s NetworkContext<'h>, chain: &'s BlockChainClient) -> NetSyncIo<'s, 'h> { + pub fn new(network: &'s NetworkContext<'h>, chain: &'s BlockChainClient, snapshot_service: &'s SnapshotService) -> NetSyncIo<'s, 'h> { NetSyncIo { network: network, chain: chain, + snapshot_service: snapshot_service, } } } @@ -80,6 +88,10 @@ impl<'s, 'h> SyncIo for NetSyncIo<'s, 'h> { self.chain } + fn snapshot_service(&self) -> &SnapshotService { + self.snapshot_service + } + fn peer_info(&self, peer_id: PeerId) -> String { self.network.peer_info(peer_id) } @@ -87,6 +99,10 @@ impl<'s, 'h> SyncIo for NetSyncIo<'s, 'h> { fn is_expired(&self) -> bool { self.network.is_expired() } + + fn eth_protocol_version(&self, peer_id: PeerId) -> u8 { + self.network.protocol_version(peer_id, ETH_PROTOCOL).unwrap_or(0) + } } diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index fba57681d..cbed49eff 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -16,22 +16,26 @@ use util::*; use network::*; +use tests::snapshot::*; use ethcore::client::{TestBlockChainClient, BlockChainClient}; use ethcore::header::BlockNumber; +use ethcore::snapshot::SnapshotService; use sync_io::SyncIo; use chain::ChainSync; use ::SyncConfig; pub struct TestIo<'p> { pub chain: &'p mut TestBlockChainClient, + pub snapshot_service: &'p TestSnapshotService, pub queue: &'p mut VecDeque, pub sender: Option, } impl<'p> TestIo<'p> { - pub fn new(chain: &'p mut TestBlockChainClient, queue: &'p mut VecDeque, sender: Option) -> TestIo<'p> { + pub fn new(chain: &'p mut TestBlockChainClient, ss: &'p TestSnapshotService, queue: &'p mut VecDeque, sender: Option) -> TestIo<'p> { TestIo { chain: chain, + snapshot_service: ss, queue: queue, sender: sender } @@ -70,6 +74,14 @@ impl<'p> SyncIo for TestIo<'p> { fn chain(&self) -> &BlockChainClient { self.chain } + + fn snapshot_service(&self) -> &SnapshotService { + self.snapshot_service + } + + fn eth_protocol_version(&self, _peer: PeerId) -> u8 { + 64 + } } pub struct TestPacket { @@ -80,6 +92,7 @@ pub struct TestPacket { pub struct TestPeer { pub chain: TestBlockChainClient, + pub snapshot_service: Arc, pub sync: RwLock, pub queue: VecDeque, } @@ -103,9 +116,11 @@ impl TestNet { let chain = TestBlockChainClient::new(); let mut config = SyncConfig::default(); config.fork_block = fork; + let ss = Arc::new(TestSnapshotService::new()); let sync = ChainSync::new(config, &chain); net.peers.push(TestPeer { sync: RwLock::new(sync), + snapshot_service: ss, chain: chain, queue: VecDeque::new(), }); @@ -126,7 +141,7 @@ impl TestNet { for client in 0..self.peers.len() { if peer != client { let mut p = self.peers.get_mut(peer).unwrap(); - p.sync.write().on_peer_connected(&mut TestIo::new(&mut p.chain, &mut p.queue, Some(client as PeerId)), client as PeerId); + p.sync.write().on_peer_connected(&mut TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(client as PeerId)), client as PeerId); } } } @@ -137,22 +152,22 @@ impl TestNet { if let Some(packet) = self.peers[peer].queue.pop_front() { let mut p = self.peers.get_mut(packet.recipient).unwrap(); trace!("--- {} -> {} ---", peer, packet.recipient); - ChainSync::dispatch_packet(&p.sync, &mut TestIo::new(&mut p.chain, &mut p.queue, Some(peer as PeerId)), peer as PeerId, packet.packet_id, &packet.data); + ChainSync::dispatch_packet(&p.sync, &mut TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(peer as PeerId)), peer as PeerId, packet.packet_id, &packet.data); trace!("----------------"); } let mut p = self.peers.get_mut(peer).unwrap(); - p.sync.write().maintain_sync(&mut TestIo::new(&mut p.chain, &mut p.queue, None)); + p.sync.write().maintain_sync(&mut TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, None)); } } pub fn sync_step_peer(&mut self, peer_num: usize) { let mut peer = self.peer_mut(peer_num); - peer.sync.write().maintain_sync(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None)); + peer.sync.write().maintain_sync(&mut TestIo::new(&mut peer.chain, &peer.snapshot_service, &mut peer.queue, None)); } pub fn restart_peer(&mut self, i: usize) { let peer = self.peer_mut(i); - peer.sync.write().restart(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None)); + peer.sync.write().restart(&mut TestIo::new(&mut peer.chain, &peer.snapshot_service, &mut peer.queue, None)); } pub fn sync(&mut self) -> u32 { @@ -181,6 +196,6 @@ impl TestNet { pub fn trigger_chain_new_blocks(&mut self, peer_id: usize) { let mut peer = self.peer_mut(peer_id); - peer.sync.write().chain_new_blocks(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None), &[], &[], &[], &[], &[]); + peer.sync.write().chain_new_blocks(&mut TestIo::new(&mut peer.chain, &peer.snapshot_service, &mut peer.queue, None), &[], &[], &[], &[], &[]); } } diff --git a/sync/src/tests/mod.rs b/sync/src/tests/mod.rs index 5afda05f0..bdb4ae4f9 100644 --- a/sync/src/tests/mod.rs +++ b/sync/src/tests/mod.rs @@ -15,5 +15,6 @@ // along with Parity. If not, see . pub mod helpers; +pub mod snapshot; mod chain; mod rpc; diff --git a/sync/src/tests/snapshot.rs b/sync/src/tests/snapshot.rs new file mode 100644 index 000000000..b27602b0d --- /dev/null +++ b/sync/src/tests/snapshot.rs @@ -0,0 +1,123 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use util::*; +use ethcore::snapshot::{SnapshotService, ManifestData, RestorationStatus}; +use ethcore::header::BlockNumber; +use ethcore::client::{EachBlockWith}; +use super::helpers::*; + +pub struct TestSnapshotService { + manifest: Option, + chunks: HashMap, + + restoration_manifest: Mutex>, + state_restoration_chunks: Mutex>, + block_restoration_chunks: Mutex>, +} + +impl TestSnapshotService { + pub fn new() -> TestSnapshotService { + TestSnapshotService { + manifest: None, + chunks: HashMap::new(), + restoration_manifest: Mutex::new(None), + state_restoration_chunks: Mutex::new(HashMap::new()), + block_restoration_chunks: Mutex::new(HashMap::new()), + } + } + + pub fn new_with_snapshot(num_chunks: usize, block_hash: H256, block_number: BlockNumber) -> TestSnapshotService { + let num_state_chunks = num_chunks / 2; + let num_block_chunks = num_chunks - num_state_chunks; + let state_chunks: Vec = (0..num_state_chunks).map(|_| H256::random().to_vec()).collect(); + let block_chunks: Vec = (0..num_block_chunks).map(|_| H256::random().to_vec()).collect(); + let manifest = ManifestData { + state_hashes: state_chunks.iter().map(|data| data.sha3()).collect(), + block_hashes: block_chunks.iter().map(|data| data.sha3()).collect(), + state_root: H256::new(), + block_number: block_number, + block_hash: block_hash, + }; + let mut chunks: HashMap = state_chunks.into_iter().map(|data| (data.sha3(), data)).collect(); + chunks.extend(block_chunks.into_iter().map(|data| (data.sha3(), data))); + TestSnapshotService { + manifest: Some(manifest), + chunks: chunks, + restoration_manifest: Mutex::new(None), + state_restoration_chunks: Mutex::new(HashMap::new()), + block_restoration_chunks: Mutex::new(HashMap::new()), + } + } +} + +impl SnapshotService for TestSnapshotService { + fn manifest(&self) -> Option { + self.manifest.as_ref().cloned() + } + + fn chunk(&self, hash: H256) -> Option { + self.chunks.get(&hash).cloned() + } + + fn status(&self) -> RestorationStatus { + match &*self.restoration_manifest.lock() { + &Some(ref manifest) if self.state_restoration_chunks.lock().len() == manifest.state_hashes.len() && + self.block_restoration_chunks.lock().len() == manifest.block_hashes.len() => RestorationStatus::Inactive, + &Some(_) => RestorationStatus::Ongoing { + state_chunks_done: self.state_restoration_chunks.lock().len() as u32, + block_chunks_done: self.block_restoration_chunks.lock().len() as u32, + }, + &None => RestorationStatus::Inactive, + } + } + + fn begin_restore(&self, manifest: ManifestData) { + *self.restoration_manifest.lock() = Some(manifest); + self.state_restoration_chunks.lock().clear(); + self.block_restoration_chunks.lock().clear(); + } + + fn abort_restore(&self) { + *self.restoration_manifest.lock() = None; + self.state_restoration_chunks.lock().clear(); + self.block_restoration_chunks.lock().clear(); + } + + fn restore_state_chunk(&self, hash: H256, chunk: Bytes) { + if self.restoration_manifest.lock().as_ref().map_or(false, |ref m| m.state_hashes.iter().any(|h| h == &hash)) { + self.state_restoration_chunks.lock().insert(hash, chunk); + } + } + + fn restore_block_chunk(&self, hash: H256, chunk: Bytes) { + if self.restoration_manifest.lock().as_ref().map_or(false, |ref m| m.block_hashes.iter().any(|h| h == &hash)) { + self.block_restoration_chunks.lock().insert(hash, chunk); + } + } +} + +#[test] +fn snapshot_sync() { + ::env_logger::init().ok(); + let mut net = TestNet::new(2); + net.peer_mut(0).snapshot_service = Arc::new(TestSnapshotService::new_with_snapshot(16, H256::new(), 1)); + net.peer_mut(0).chain.add_blocks(1, EachBlockWith::Nothing); + net.sync_steps(19); // status + manifest + chunks + assert_eq!(net.peer(1).snapshot_service.state_restoration_chunks.lock().len(), net.peer(0).snapshot_service.manifest.as_ref().unwrap().state_hashes.len()); + assert_eq!(net.peer(1).snapshot_service.block_restoration_chunks.lock().len(), net.peer(0).snapshot_service.manifest.as_ref().unwrap().block_hashes.len()); +} + diff --git a/util/network/src/host.rs b/util/network/src/host.rs index 359f54f1a..ebc10324f 100644 --- a/util/network/src/host.rs +++ b/util/network/src/host.rs @@ -282,6 +282,12 @@ impl<'s> NetworkContext<'s> { } "unknown".to_owned() } + + /// Returns max version for a given protocol. + pub fn protocol_version(&self, peer: PeerId, protocol: &str) -> Option { + let session = self.resolve_session(peer); + session.and_then(|s| s.lock().capability_version(protocol)) + } } /// Shared host information diff --git a/util/network/src/session.rs b/util/network/src/session.rs index 8ebd37090..164248d62 100644 --- a/util/network/src/session.rs +++ b/util/network/src/session.rs @@ -243,6 +243,11 @@ impl Session { self.info.capabilities.iter().any(|c| c.protocol == protocol) } + /// Checks if peer supports given capability + pub fn capability_version(&self, protocol: &str) -> Option { + self.info.capabilities.iter().filter_map(|c| if c.protocol == protocol { Some(c.version) } else { None }).max() + } + /// Register the session socket with the event loop pub fn register_socket>(&self, reg: Token, event_loop: &mut EventLoop) -> Result<(), NetworkError> { if self.expired() { diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs index 5db7801a1..177df5fa0 100644 --- a/util/src/kvdb.rs +++ b/util/src/kvdb.rs @@ -16,9 +16,11 @@ //! Key-Value store abstraction with `RocksDB` backend. +use std::io::ErrorKind; use common::*; use elastic_array::*; use std::default::Default; +use std::path::PathBuf; use rlp::{UntrustedRlp, RlpType, View, Compressible}; use rocksdb::{DB, Writable, WriteBatch, WriteOptions, IteratorMode, DBIterator, Options, DBCompactionStyle, BlockBasedOptions, Direction, Cache, Column}; @@ -189,12 +191,18 @@ impl<'a> Iterator for DatabaseIterator { } } +struct DBAndColumns { + db: DB, + cfs: Vec, +} + /// Key-Value database. pub struct Database { - db: DB, + db: RwLock>, + config: DatabaseConfig, write_opts: WriteOptions, - cfs: Vec, overlay: RwLock, KeyState>>>, + path: String, } impl Database { @@ -278,11 +286,13 @@ impl Database { }, Err(s) => { return Err(s); } }; + let num_cols = cfs.len(); Ok(Database { - db: db, + db: RwLock::new(Some(DBAndColumns{ db: db, cfs: cfs })), + config: config.clone(), write_opts: write_opts, - overlay: RwLock::new((0..(cfs.len() + 1)).map(|_| HashMap::new()).collect()), - cfs: cfs, + overlay: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()), + path: path.to_owned(), }) } @@ -320,94 +330,167 @@ impl Database { /// Commit buffered changes to database. pub fn flush(&self) -> Result<(), String> { - let batch = WriteBatch::new(); - let mut overlay = self.overlay.write(); + match &*self.db.read() { + &Some(DBAndColumns { ref db, ref cfs }) => { + let batch = WriteBatch::new(); + let mut overlay = self.overlay.write(); - for (c, column) in overlay.iter_mut().enumerate() { - let column_data = mem::replace(column, HashMap::new()); - for (key, state) in column_data.into_iter() { - match state { - KeyState::Delete => { - if c > 0 { - try!(batch.delete_cf(self.cfs[c - 1], &key)); - } else { - try!(batch.delete(&key)); - } - }, - KeyState::Insert(value) => { - if c > 0 { - try!(batch.put_cf(self.cfs[c - 1], &key, &value)); - } else { - try!(batch.put(&key, &value)); - } - }, - KeyState::InsertCompressed(value) => { - let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks); - if c > 0 { - try!(batch.put_cf(self.cfs[c - 1], &key, &compressed)); - } else { - try!(batch.put(&key, &value)); + for (c, column) in overlay.iter_mut().enumerate() { + let column_data = mem::replace(column, HashMap::new()); + for (key, state) in column_data.into_iter() { + match state { + KeyState::Delete => { + if c > 0 { + try!(batch.delete_cf(cfs[c - 1], &key)); + } else { + try!(batch.delete(&key)); + } + }, + KeyState::Insert(value) => { + if c > 0 { + try!(batch.put_cf(cfs[c - 1], &key, &value)); + } else { + try!(batch.put(&key, &value)); + } + }, + KeyState::InsertCompressed(value) => { + let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks); + if c > 0 { + try!(batch.put_cf(cfs[c - 1], &key, &compressed)); + } else { + try!(batch.put(&key, &value)); + } + } } } } - } + db.write_opt(batch, &self.write_opts) + }, + &None => Err("Database is closed".to_owned()) } - self.db.write_opt(batch, &self.write_opts) } /// Commit transaction to database. pub fn write(&self, tr: DBTransaction) -> Result<(), String> { - let batch = WriteBatch::new(); - let ops = tr.ops; - for op in ops { - match op { - DBOp::Insert { col, key, value } => { - try!(col.map_or_else(|| batch.put(&key, &value), |c| batch.put_cf(self.cfs[c as usize], &key, &value))) - }, - DBOp::InsertCompressed { col, key, value } => { - let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks); - try!(col.map_or_else(|| batch.put(&key, &compressed), |c| batch.put_cf(self.cfs[c as usize], &key, &compressed))) - }, - DBOp::Delete { col, key } => { - try!(col.map_or_else(|| batch.delete(&key), |c| batch.delete_cf(self.cfs[c as usize], &key))) - }, - } + match &*self.db.read() { + &Some(DBAndColumns { ref db, ref cfs }) => { + let batch = WriteBatch::new(); + let ops = tr.ops; + for op in ops { + match op { + DBOp::Insert { col, key, value } => { + try!(col.map_or_else(|| batch.put(&key, &value), |c| batch.put_cf(cfs[c as usize], &key, &value))) + }, + DBOp::InsertCompressed { col, key, value } => { + let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks); + try!(col.map_or_else(|| batch.put(&key, &compressed), |c| batch.put_cf(cfs[c as usize], &key, &compressed))) + }, + DBOp::Delete { col, key } => { + try!(col.map_or_else(|| batch.delete(&key), |c| batch.delete_cf(cfs[c as usize], &key))) + }, + } + } + db.write_opt(batch, &self.write_opts) + }, + &None => Err("Database is closed".to_owned()) } - self.db.write_opt(batch, &self.write_opts) } /// Get value by key. pub fn get(&self, col: Option, key: &[u8]) -> Result, String> { - let overlay = &self.overlay.read()[Self::to_overlay_column(col)]; - match overlay.get(key) { - Some(&KeyState::Insert(ref value)) | Some(&KeyState::InsertCompressed(ref value)) => Ok(Some(value.clone())), - Some(&KeyState::Delete) => Ok(None), - None => { - col.map_or_else( - || self.db.get(key).map(|r| r.map(|v| v.to_vec())), - |c| self.db.get_cf(self.cfs[c as usize], key).map(|r| r.map(|v| v.to_vec()))) + match &*self.db.read() { + &Some(DBAndColumns { ref db, ref cfs }) => { + let overlay = &self.overlay.read()[Self::to_overlay_column(col)]; + match overlay.get(key) { + Some(&KeyState::Insert(ref value)) | Some(&KeyState::InsertCompressed(ref value)) => Ok(Some(value.clone())), + Some(&KeyState::Delete) => Ok(None), + None => { + col.map_or_else( + || db.get(key).map(|r| r.map(|v| v.to_vec())), + |c| db.get_cf(cfs[c as usize], key).map(|r| r.map(|v| v.to_vec()))) + }, + } }, + &None => Ok(None), } } /// Get value by partial key. Prefix size should match configured prefix size. Only searches flushed values. - // TODO: support prefix seek for unflushed ata + // TODO: support prefix seek for unflushed data pub fn get_by_prefix(&self, col: Option, prefix: &[u8]) -> Option> { - let mut iter = col.map_or_else(|| self.db.iterator(IteratorMode::From(prefix, Direction::Forward)), - |c| self.db.iterator_cf(self.cfs[c as usize], IteratorMode::From(prefix, Direction::Forward)).unwrap()); - match iter.next() { - // TODO: use prefix_same_as_start read option (not availabele in C API currently) - Some((k, v)) => if k[0 .. prefix.len()] == prefix[..] { Some(v) } else { None }, - _ => None + match &*self.db.read() { + &Some(DBAndColumns { ref db, ref cfs }) => { + let mut iter = col.map_or_else(|| db.iterator(IteratorMode::From(prefix, Direction::Forward)), + |c| db.iterator_cf(cfs[c as usize], IteratorMode::From(prefix, Direction::Forward)).unwrap()); + match iter.next() { + // TODO: use prefix_same_as_start read option (not availabele in C API currently) + Some((k, v)) => if k[0 .. prefix.len()] == prefix[..] { Some(v) } else { None }, + _ => None + } + }, + &None => None, } } /// Get database iterator for flushed data. pub fn iter(&self, col: Option) -> DatabaseIterator { //TODO: iterate over overlay - col.map_or_else(|| DatabaseIterator { iter: self.db.iterator(IteratorMode::Start) }, - |c| DatabaseIterator { iter: self.db.iterator_cf(self.cfs[c as usize], IteratorMode::Start).unwrap() }) + match &*self.db.read() { + &Some(DBAndColumns { ref db, ref cfs }) => { + col.map_or_else(|| DatabaseIterator { iter: db.iterator(IteratorMode::Start) }, + |c| DatabaseIterator { iter: db.iterator_cf(cfs[c as usize], IteratorMode::Start).unwrap() }) + }, + &None => panic!("Not supported yet") //TODO: return an empty iterator or change return type + } + } + + /// Close the database + fn close(&self) { + *self.db.write() = None; + self.overlay.write().clear(); + } + + /// Restore the database from a copy at given path. + pub fn restore(&self, new_db: &str) -> Result<(), UtilError> { + self.close(); + + let mut backup_db = PathBuf::from(&self.path); + backup_db.pop(); + backup_db.push("backup_db"); + println!("Path at {:?}", self.path); + println!("Backup at {:?}", backup_db); + + let existed = match fs::rename(&self.path, &backup_db) { + Ok(_) => true, + Err(e) => if let ErrorKind::NotFound = e.kind() { + false + } else { + return Err(e.into()); + } + }; + + match fs::rename(&new_db, &self.path) { + Ok(_) => { + // clean up the backup. + if existed { + try!(fs::remove_dir_all(&backup_db)); + } + } + Err(e) => { + // restore the backup. + if existed { + try!(fs::rename(&backup_db, &self.path)); + } + return Err(e.into()) + } + } + + // reopen the database and steal handles into self + let db = try!(Self::open(&self.config, &self.path)); + *self.db.write() = mem::replace(&mut *db.db.write(), None); + *self.overlay.write() = mem::replace(&mut *db.overlay.write(), Vec::new()); + Ok(()) } } diff --git a/util/src/misc.rs b/util/src/misc.rs index 62e8542db..50b2e7e8d 100644 --- a/util/src/misc.rs +++ b/util/src/misc.rs @@ -16,7 +16,6 @@ //! Diff misc. -use std::fs::File; use common::*; use rlp::{Stream, RlpStream}; use target_info::Target; @@ -33,14 +32,6 @@ pub enum Filth { Dirty, } -/// Read the whole contents of a file `name`. -pub fn contents(name: &str) -> Result { - let mut file = try!(File::open(name)); - let mut ret: Vec = Vec::new(); - try!(file.read_to_end(&mut ret)); - Ok(ret) -} - /// Get the standard version string for this software. pub fn version() -> String { let sha3 = short_sha(); @@ -64,4 +55,4 @@ pub fn version_data() -> Bytes { s.append(&rustc_version()); s.append(&&Target::os()[0..2]); s.out() -} \ No newline at end of file +}