From 04dee54cb3b52342911a8ac7b0cf571f19c21ce4 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 2 Sep 2016 16:15:25 +0200 Subject: [PATCH 01/17] add take_snapshot to snapshot service --- ethcore/src/service.rs | 7 +++++ ethcore/src/snapshot/service.rs | 46 +++++++++++++++++++++++++++------ 2 files changed, 45 insertions(+), 8 deletions(-) diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index e2e4772a4..de32256dd 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -46,6 +46,8 @@ pub enum ClientIoMessage { FeedStateChunk(H256, Bytes), /// Feed a block chunk to the snapshot service FeedBlockChunk(H256, Bytes), + /// Take a snapshot for the block with given number. + TakeSnapshot(u64), } /// Client service setup. Creates and registers client and network services with the IO subsystem. @@ -170,6 +172,11 @@ impl IoHandler for ClientIoHandler { } ClientIoMessage::FeedStateChunk(ref hash, ref chunk) => self.snapshot.feed_state_chunk(*hash, chunk), ClientIoMessage::FeedBlockChunk(ref hash, ref chunk) => self.snapshot.feed_block_chunk(*hash, chunk), + ClientIoMessage::TakeSnapshot(num) => { + if let Err(e) = self.snapshot.take_snapshot(&*self.client, num) { + warn!("Failed to take snapshot at block #{}: {}", num, e); + } + } _ => {} // ignore other messages } } diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index 9f2b3f34a..d2a9be046 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -27,8 +27,10 @@ use super::{ManifestData, StateRebuilder, BlockRebuilder}; use super::io::{SnapshotReader, LooseReader, SnapshotWriter, LooseWriter}; use blockchain::BlockChain; +use client::Client; use engines::Engine; use error::Error; +use ids::BlockID; use service::ClientIoMessage; use spec::Spec; @@ -269,6 +271,13 @@ impl Service { dir } + // get the temporary snapshot dir. + fn temp_snapshot_dir(&self) -> PathBuf { + let mut dir = self.root_dir(); + dir.push("in_progress"); + dir + } + // get the restoration directory. fn restoration_dir(&self) -> PathBuf { let mut dir = self.root_dir(); @@ -328,6 +337,34 @@ impl Service { } } + /// Take a snapshot at the block with the given number. + /// calling this while a restoration is in progress or vice versa + /// will lead to a race condition where the first one to finish will + /// have their produced snapshot overwritten. + pub fn take_snapshot(&self, client: &Client, num: u64) -> Result<(), Error> { + info!("Taking snapshot at #{}", num); + + let temp_dir = self.temp_snapshot_dir(); + let snapshot_dir = self.snapshot_dir(); + + let _ = fs::remove_dir_all(&temp_dir); + let writer = try!(LooseWriter::new(temp_dir.clone())); + let progress = Default::default(); + + // Todo [rob] log progress. + try!(client.take_snapshot(writer, BlockID::Number(num), &progress)); + let mut reader = self.reader.write(); + + // destroy the old snapshot reader. + *reader = None; + + try!(fs::rename(temp_dir, &snapshot_dir)); + + *reader = Some(try!(LooseReader::new(snapshot_dir))); + + Ok(()) + } + /// Initialize the restoration synchronously. pub fn init_restore(&self, manifest: ManifestData) -> Result<(), Error> { let rest_dir = self.restoration_dir(); @@ -393,14 +430,7 @@ impl Service { try!(fs::create_dir(&snapshot_dir)); trace!(target: "snapshot", "copying restored snapshot files over"); - for maybe_file in try!(fs::read_dir(self.temp_recovery_dir())) { - let path = try!(maybe_file).path(); - if let Some(name) = path.file_name().map(|x| x.to_owned()) { - let mut new_path = snapshot_dir.clone(); - new_path.push(name); - try!(fs::rename(path, new_path)); - } - } + try!(fs::rename(self.temp_recovery_dir(), &snapshot_dir)); let _ = fs::remove_dir_all(self.restoration_dir()); From e3749b3bc4f31b0ebcc7c752161bd7dd1db413d6 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 2 Sep 2016 18:28:47 +0200 Subject: [PATCH 02/17] implement snapshot watcher --- ethcore/src/client/chain_notify.rs | 3 +- ethcore/src/snapshot/mod.rs | 2 + ethcore/src/snapshot/watcher.rs | 177 +++++++++++++++++++++++++++++ 3 files changed, 181 insertions(+), 1 deletion(-) create mode 100644 ethcore/src/snapshot/watcher.rs diff --git a/ethcore/src/client/chain_notify.rs b/ethcore/src/client/chain_notify.rs index 897c8cfac..737cd0153 100644 --- a/ethcore/src/client/chain_notify.rs +++ b/ethcore/src/client/chain_notify.rs @@ -20,7 +20,8 @@ use util::H256; /// Represents what has to be handled by actor listening to chain events #[derive(Ipc)] pub trait ChainNotify : Send + Sync { - /// fires when chain has new blocks + /// fires when chain has new blocks, not including those encountered during + /// a major sync. fn new_blocks(&self, _imported: Vec, _invalid: Vec, diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index 89e4ed8ba..979089331 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -45,6 +45,7 @@ use rand::{Rng, OsRng}; pub use self::error::Error; pub use self::service::{RestorationStatus, Service, SnapshotService}; +pub use self::watcher::Watcher; pub mod io; pub mod service; @@ -52,6 +53,7 @@ pub mod service; mod account; mod block; mod error; +mod watcher; #[cfg(test)] mod tests; diff --git a/ethcore/src/snapshot/watcher.rs b/ethcore/src/snapshot/watcher.rs new file mode 100644 index 000000000..d2dd1d7dc --- /dev/null +++ b/ethcore/src/snapshot/watcher.rs @@ -0,0 +1,177 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Watcher for snapshot-related chain events. + +use client::{BlockChainClient, Client, ChainNotify}; +use ids::BlockID; +use service::ClientIoMessage; +use views::HeaderView; + +use io::IoChannel; +use util::hash::H256; + +use std::sync::Arc; + +// helper trait for transforming hashes to numbers. +trait HashToNumber: Send + Sync { + fn to_number(&self, hash: H256) -> Option; +} + +impl HashToNumber for Client { + fn to_number(&self, hash: H256) -> Option { + self.block_header(BlockID::Hash(hash)).map(|h| HeaderView::new(&h).number()) + } +} + +/// A `ChainNotify` implementation which will trigger a snapshot event +/// at certain block numbers. +pub struct Watcher { + oracle: Arc, + channel: IoChannel, + period: u64, + history: u64, +} + +impl Watcher { + /// Create a new `Watcher` which will trigger a snapshot event + /// once every `period` blocks, but only after that block is + /// `history` blocks old. + pub fn new(client: Arc, channel: IoChannel, period: u64, history: u64) -> Self { + Watcher { + oracle: client, + channel: channel, + period: period, + history: history, + } + } +} + +impl ChainNotify for Watcher { + fn new_blocks( + &self, + imported: Vec, + _: Vec, + _: Vec, + _: Vec, + _: Vec, + _duration: u64) + { + + let highest = imported.into_iter() + .filter_map(|h| self.oracle.to_number(h)) + .filter(|&num| num >= self.period + self.history) + .map(|num| num - self.history) + .filter(|num| num % self.period == 0) + .fold(0, ::std::cmp::max); + + if highest != 0 { + if let Err(e) = self.channel.send(ClientIoMessage::TakeSnapshot(highest)) { + warn!("Snapshot watcher disconnected from IoService: {}", e); + } + } + } +} + +#[cfg(test)] +mod tests { + use super::{HashToNumber, Watcher}; + + use client::ChainNotify; + use service::ClientIoMessage; + + use util::{H256, U256, Mutex}; + use io::{IoContext, IoHandler, IoService}; + + use std::collections::HashMap; + use std::sync::Arc; + + struct TestOracle(HashMap); + + impl HashToNumber for TestOracle { + fn to_number(&self, hash: H256) -> Option { + self.0.get(&hash).cloned() + } + } + + struct Handler(Arc>>); + + impl IoHandler for Handler { + fn message(&self, _context: &IoContext, message: &ClientIoMessage) { + match *message { + ClientIoMessage::TakeSnapshot(num) => self.0.lock().push(num), + _ => {} + } + } + } + + // helper harness for tests. + fn harness(numbers: Vec, period: u64, history: u64) -> Vec { + let events = Arc::new(Mutex::new(Vec::new())); + + let service = IoService::start().unwrap(); + service.register_handler(Arc::new(Handler(events.clone()))).unwrap(); + + let hashes: Vec<_> = numbers.clone().into_iter().map(|x| H256::from(U256::from(x))).collect(); + let mut map = hashes.clone().into_iter().zip(numbers).collect(); + + let watcher = Watcher { + oracle: Arc::new(TestOracle(map)), + channel: service.channel(), + period: period, + history: history, + }; + + watcher.new_blocks( + hashes, + vec![], + vec![], + vec![], + vec![], + 0, + ); + + drop(service); + + // binding necessary for compilation. + let v = events.lock().clone(); + v + } + + #[test] + fn should_not_fire() { + let events = harness(vec![0], 5, 0); + assert_eq!(events, vec![]); + } + + #[test] + fn fires_once_for_two() { + let events = harness(vec![14, 15], 10, 5); + assert_eq!(events, vec![10]); + } + + #[test] + fn finds_highest() { + let events = harness(vec![15, 25], 10, 5); + assert_eq!(events, vec![20]); + } + + #[test] + fn doesnt_fire_before_history() { + let events = harness(vec![10, 11], 10, 5); + assert_eq!(events, vec![]); + } +} \ No newline at end of file From 1c450f616d59b8a001e22b892e7476a016f7883f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 2 Sep 2016 18:48:07 +0200 Subject: [PATCH 03/17] register the watcher as a ChainNotify --- parity/run.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/parity/run.rs b/parity/run.rs index 71995cd5f..2d619d350 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -28,6 +28,7 @@ use ethcore::client::{Mode, Switch, DatabaseCompactionProfile, VMType, ChainNoti use ethcore::service::ClientService; use ethcore::account_provider::AccountProvider; use ethcore::miner::{Miner, MinerService, ExternalMiner, MinerOptions}; +use ethcore::snapshot; use ethsync::SyncConfig; use informant::Informant; @@ -46,6 +47,9 @@ use rpc_apis; use rpc; use url; +const SNAPSHOT_PERIOD: u64 = 10000; +const SNAPSHOT_HISTORY: u64 = 1000; + #[derive(Debug, PartialEq)] pub struct RunCmd { pub cache_config: CacheConfig, @@ -249,6 +253,15 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { }); service.register_io_handler(io_handler).expect("Error registering IO handler"); + let watcher = snapshot::Watcher::new( + service.client(), + service.io().channel(), + SNAPSHOT_PERIOD, + SNAPSHOT_HISTORY, + ); + + service.add_notify(Arc::new(watcher)); + // start ui if cmd.ui { if !cmd.dapps_conf.enabled { From d9eb87cae7817253d04822f2289420caabe0b8c3 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 2 Sep 2016 19:00:20 +0200 Subject: [PATCH 04/17] add guard for temporary directories --- ethcore/src/snapshot/service.rs | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index d2a9be046..449ced23f 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -41,6 +41,23 @@ use util::journaldb::Algorithm; use util::kvdb::{Database, DatabaseConfig}; use util::snappy; +/// Helper for removing directories in case of error. +struct Guard(bool, PathBuf); + +impl Guard { + fn new(path: PathBuf) -> Self { Guard(true, path) } + + fn disarm(mut self) { self.0 = false } +} + +impl Drop for Guard { + fn drop(&mut self) { + if self.0 { + let _ = fs::remove_dir_all(&self.1); + } + } +} + /// Statuses for restorations. #[derive(PartialEq, Clone, Copy, Debug)] pub enum RestorationStatus { @@ -98,6 +115,7 @@ struct Restoration { writer: LooseWriter, snappy_buffer: Bytes, final_state_root: H256, + guard: Guard, } struct RestorationParams<'a> { @@ -106,6 +124,7 @@ struct RestorationParams<'a> { db_path: PathBuf, // database path writer: LooseWriter, // writer for recovered snapshot. genesis: &'a [u8], // genesis block of the chain. + guard: Guard, // guard for the restoration directory. } impl Restoration { @@ -133,6 +152,7 @@ impl Restoration { writer: params.writer, snappy_buffer: Vec::new(), final_state_root: root, + guard: params.guard, }) } @@ -181,6 +201,7 @@ impl Restoration { try!(self.writer.finish(self.manifest)); + self.guard.disarm(); Ok(()) } @@ -348,10 +369,12 @@ impl Service { let snapshot_dir = self.snapshot_dir(); let _ = fs::remove_dir_all(&temp_dir); + let writer = try!(LooseWriter::new(temp_dir.clone())); let progress = Default::default(); // Todo [rob] log progress. + let guard = Guard::new(temp_dir.clone()); try!(client.take_snapshot(writer, BlockID::Number(num), &progress)); let mut reader = self.reader.write(); @@ -362,6 +385,7 @@ impl Service { *reader = Some(try!(LooseReader::new(snapshot_dir))); + guard.disarm(); Ok(()) } @@ -393,6 +417,7 @@ impl Service { db_path: self.restoration_db(), writer: writer, genesis: &self.genesis_block, + guard: Guard::new(rest_dir), }; *res = Some(try!(Restoration::new(params))); From a0541738aba2792a6eaf50af563946be32fc7408 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 2 Sep 2016 20:24:59 +0200 Subject: [PATCH 05/17] disabling of periodic snapshots with the --no-periodic-snapshot flag --- parity/cli.rs | 3 +++ parity/configuration.rs | 2 ++ parity/run.rs | 20 +++++++++++++------- 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/parity/cli.rs b/parity/cli.rs index 8f33489dc..91b9c1620 100644 --- a/parity/cli.rs +++ b/parity/cli.rs @@ -243,6 +243,8 @@ Snapshot Options: index, hash, or 'latest'. Note that taking snapshots at non-recent blocks will only work with --pruning archive [default: latest] + --no-periodic-snapshot Disable automated snapshots which usually occur once + every 10000 blocks. Virtual Machine Options: --jitvm Enable the JIT VM. @@ -382,6 +384,7 @@ pub struct Args { pub flag_from: String, pub flag_to: String, pub flag_at: String, + pub flag_no_periodic_snapshot: bool, pub flag_format: Option, pub flag_jitvm: bool, pub flag_log_file: Option, diff --git a/parity/configuration.rs b/parity/configuration.rs index f2fd34853..51d637580 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -226,6 +226,7 @@ impl Configuration { ui: self.args.cmd_ui, name: self.args.flag_identity, custom_bootnodes: self.args.flag_bootnodes.is_some(), + no_periodic_snapshot: self.args.flag_no_periodic_snapshot, }; Cmd::Run(run_cmd) }; @@ -802,6 +803,7 @@ mod tests { ui: false, name: "".into(), custom_bootnodes: false, + no_periodic_snapshot: false, })); } diff --git a/parity/run.rs b/parity/run.rs index 2d619d350..d384a3a7c 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -47,7 +47,10 @@ use rpc_apis; use rpc; use url; +// how often to take periodic snapshots. const SNAPSHOT_PERIOD: u64 = 10000; + +// how many blocks to wait before starting a periodic snapshot. const SNAPSHOT_HISTORY: u64 = 1000; #[derive(Debug, PartialEq)] @@ -81,6 +84,7 @@ pub struct RunCmd { pub ui: bool, pub name: String, pub custom_bootnodes: bool, + pub no_periodic_snapshot: bool, } pub fn execute(cmd: RunCmd) -> Result<(), String> { @@ -253,14 +257,16 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { }); service.register_io_handler(io_handler).expect("Error registering IO handler"); - let watcher = snapshot::Watcher::new( - service.client(), - service.io().channel(), - SNAPSHOT_PERIOD, - SNAPSHOT_HISTORY, - ); + if !cmd.no_periodic_snapshot { + let watcher = snapshot::Watcher::new( + service.client(), + service.io().channel(), + SNAPSHOT_PERIOD, + SNAPSHOT_HISTORY, + ); - service.add_notify(Arc::new(watcher)); + service.add_notify(Arc::new(watcher)); + } // start ui if cmd.ui { From 2bf235e226a6431bf38a4e270d454527405f30f3 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 5 Sep 2016 12:17:21 +0200 Subject: [PATCH 06/17] use more mocking in tests --- ethcore/src/client/chain_notify.rs | 3 +- ethcore/src/snapshot/watcher.rs | 81 +++++++++++++++--------------- 2 files changed, 41 insertions(+), 43 deletions(-) diff --git a/ethcore/src/client/chain_notify.rs b/ethcore/src/client/chain_notify.rs index 737cd0153..0c34382a0 100644 --- a/ethcore/src/client/chain_notify.rs +++ b/ethcore/src/client/chain_notify.rs @@ -20,8 +20,7 @@ use util::H256; /// Represents what has to be handled by actor listening to chain events #[derive(Ipc)] pub trait ChainNotify : Send + Sync { - /// fires when chain has new blocks, not including those encountered during - /// a major sync. + /// fires when chain has new blocks. fn new_blocks(&self, _imported: Vec, _invalid: Vec, diff --git a/ethcore/src/snapshot/watcher.rs b/ethcore/src/snapshot/watcher.rs index d2dd1d7dc..5bf157312 100644 --- a/ethcore/src/snapshot/watcher.rs +++ b/ethcore/src/snapshot/watcher.rs @@ -37,11 +37,29 @@ impl HashToNumber for Client { } } +// helper trait for broadcasting a block to take a snapshot at. +trait Broadcast: Send + Sync { + fn take_at(&self, num: Option); +} + +impl Broadcast for IoChannel { + fn take_at(&self, num: Option) { + let num = match num { + Some(n) => n, + None => return, + }; + + if let Err(e) = self.send(ClientIoMessage::TakeSnapshot(num)) { + warn!("Snapshot watcher disconnected from IoService: {}", e); + } + } +} + /// A `ChainNotify` implementation which will trigger a snapshot event /// at certain block numbers. pub struct Watcher { oracle: Arc, - channel: IoChannel, + broadcast: Box, period: u64, history: u64, } @@ -53,7 +71,7 @@ impl Watcher { pub fn new(client: Arc, channel: IoChannel, period: u64, history: u64) -> Self { Watcher { oracle: client, - channel: channel, + broadcast: Box::new(channel), period: period, history: history, } @@ -70,7 +88,6 @@ impl ChainNotify for Watcher { _: Vec, _duration: u64) { - let highest = imported.into_iter() .filter_map(|h| self.oracle.to_number(h)) .filter(|&num| num >= self.period + self.history) @@ -78,23 +95,20 @@ impl ChainNotify for Watcher { .filter(|num| num % self.period == 0) .fold(0, ::std::cmp::max); - if highest != 0 { - if let Err(e) = self.channel.send(ClientIoMessage::TakeSnapshot(highest)) { - warn!("Snapshot watcher disconnected from IoService: {}", e); - } + match highest { + 0 => self.broadcast.take_at(None), + _ => self.broadcast.take_at(Some(highest)), } } } #[cfg(test)] mod tests { - use super::{HashToNumber, Watcher}; + use super::{Broadcast, HashToNumber, Watcher}; use client::ChainNotify; - use service::ClientIoMessage; - use util::{H256, U256, Mutex}; - use io::{IoContext, IoHandler, IoService}; + use util::{H256, U256}; use std::collections::HashMap; use std::sync::Arc; @@ -107,30 +121,23 @@ mod tests { } } - struct Handler(Arc>>); - - impl IoHandler for Handler { - fn message(&self, _context: &IoContext, message: &ClientIoMessage) { - match *message { - ClientIoMessage::TakeSnapshot(num) => self.0.lock().push(num), - _ => {} + struct TestBroadcast(Option); + impl Broadcast for TestBroadcast { + fn take_at(&self, num: Option) { + if num != self.0 { + panic!("Watcher broadcast wrong number. Expected {:?}, found {:?}", self.0, num); } } } - // helper harness for tests. - fn harness(numbers: Vec, period: u64, history: u64) -> Vec { - let events = Arc::new(Mutex::new(Vec::new())); - - let service = IoService::start().unwrap(); - service.register_handler(Arc::new(Handler(events.clone()))).unwrap(); - + // helper harness for tests which expect a notification. + fn harness(numbers: Vec, period: u64, history: u64, expected: Option) { let hashes: Vec<_> = numbers.clone().into_iter().map(|x| H256::from(U256::from(x))).collect(); - let mut map = hashes.clone().into_iter().zip(numbers).collect(); + let map = hashes.clone().into_iter().zip(numbers).collect(); let watcher = Watcher { oracle: Arc::new(TestOracle(map)), - channel: service.channel(), + broadcast: Box::new(TestBroadcast(expected)), period: period, history: history, }; @@ -143,35 +150,27 @@ mod tests { vec![], 0, ); - - drop(service); - - // binding necessary for compilation. - let v = events.lock().clone(); - v } + // helper + #[test] fn should_not_fire() { - let events = harness(vec![0], 5, 0); - assert_eq!(events, vec![]); + harness(vec![0], 5, 0, None); } #[test] fn fires_once_for_two() { - let events = harness(vec![14, 15], 10, 5); - assert_eq!(events, vec![10]); + harness(vec![14, 15], 10, 5, Some(10)); } #[test] fn finds_highest() { - let events = harness(vec![15, 25], 10, 5); - assert_eq!(events, vec![20]); + harness(vec![15, 25], 10, 5, Some(20)); } #[test] fn doesnt_fire_before_history() { - let events = harness(vec![10, 11], 10, 5); - assert_eq!(events, vec![]); + harness(vec![10, 11], 10, 5, None); } } \ No newline at end of file From f0ef5e6943f90765a07a810263d79f37ebe3946a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 5 Sep 2016 14:25:56 +0200 Subject: [PATCH 07/17] keep snapshot watcher alive --- ethcore/src/snapshot/service.rs | 7 +++++++ ethcore/src/snapshot/watcher.rs | 4 ++++ parity/run.rs | 16 ++++++++++++++++ 3 files changed, 27 insertions(+) diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index 449ced23f..ea5ec1a0a 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -275,6 +275,13 @@ impl Service { } } + // delete the temporary snapshot dir if it does exist. + if let Err(e) = fs::remove_dir_all(service.temp_snapshot_dir()) { + if e.kind() != ErrorKind { + return Err(e.into()) + } + } + Ok(service) } diff --git a/ethcore/src/snapshot/watcher.rs b/ethcore/src/snapshot/watcher.rs index 5bf157312..5a0c3eafc 100644 --- a/ethcore/src/snapshot/watcher.rs +++ b/ethcore/src/snapshot/watcher.rs @@ -49,6 +49,8 @@ impl Broadcast for IoChannel { None => return, }; + trace!(target: "snapshot_watcher", "broadcast: {}", num); + if let Err(e) = self.send(ClientIoMessage::TakeSnapshot(num)) { warn!("Snapshot watcher disconnected from IoService: {}", e); } @@ -88,6 +90,8 @@ impl ChainNotify for Watcher { _: Vec, _duration: u64) { + trace!(target: "snapshot_watcher", "{} imported", imported.len()); + let highest = imported.into_iter() .filter_map(|h| self.oracle.to_number(h)) .filter(|&num| num >= self.period + self.history) diff --git a/parity/run.rs b/parity/run.rs index d384a3a7c..618a9f0db 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -257,6 +257,22 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { }); service.register_io_handler(io_handler).expect("Error registering IO handler"); + // the watcher must be kept alive. + let _watcher = match cmd.no_periodic_snapshot { + true => None, + false => { + let watcher = Arc::new(snapshot::Watcher::new( + service.client(), + service.io().channel(), + SNAPSHOT_PERIOD, + SNAPSHOT_HISTORY, + )); + + service.add_notify(watcher.clone()); + Some(watcher) + }, + }; + if !cmd.no_periodic_snapshot { let watcher = snapshot::Watcher::new( service.client(), From aa8b871e490f8e1eec7e20e9a00b125fe01bd27f Mon Sep 17 00:00:00 2001 From: debris Date: Mon, 5 Sep 2016 17:41:34 +0200 Subject: [PATCH 08/17] handling invalid spec jsons properly, additional tests, closes #1840 --- ethcore/src/engines/basic_authority.rs | 5 ++++- ethcore/src/engines/instant_seal.rs | 5 ++++- ethcore/src/ethereum/mod.rs | 20 ++++++++++++-------- ethcore/src/spec/spec.rs | 21 +++++++++++++++------ parity/params.rs | 7 +++++-- util/src/misc.rs | 11 +---------- 6 files changed, 41 insertions(+), 28 deletions(-) diff --git a/ethcore/src/engines/basic_authority.rs b/ethcore/src/engines/basic_authority.rs index eef3df6b1..18dfeec46 100644 --- a/ethcore/src/engines/basic_authority.rs +++ b/ethcore/src/engines/basic_authority.rs @@ -187,7 +187,10 @@ mod tests { use spec::Spec; /// Create a new test chain spec with `BasicAuthority` consensus engine. - fn new_test_authority() -> Spec { Spec::load(include_bytes!("../../res/test_authority.json")) } + fn new_test_authority() -> Spec { + let bytes: &[u8] = include_bytes!("../../res/test_authority.json"); + Spec::load(bytes).expect("invalid chain spec") + } #[test] fn has_valid_metadata() { diff --git a/ethcore/src/engines/instant_seal.rs b/ethcore/src/engines/instant_seal.rs index bdb882ee7..3c95f3465 100644 --- a/ethcore/src/engines/instant_seal.rs +++ b/ethcore/src/engines/instant_seal.rs @@ -72,7 +72,10 @@ mod tests { use block::*; /// Create a new test chain spec with `BasicAuthority` consensus engine. - fn new_test_instant() -> Spec { Spec::load(include_bytes!("../../res/instant_seal.json")) } + fn new_test_instant() -> Spec { + let bytes: &[u8] = include_bytes!("../../res/instant_seal.json"); + Spec::load(bytes).expect("invalid chain spec") + } #[test] fn instant_can_seal() { diff --git a/ethcore/src/ethereum/mod.rs b/ethcore/src/ethereum/mod.rs index 1efe001e5..6d46d5551 100644 --- a/ethcore/src/ethereum/mod.rs +++ b/ethcore/src/ethereum/mod.rs @@ -29,29 +29,33 @@ pub use self::denominations::*; use super::spec::*; +fn load(b: &[u8]) -> Spec { + Spec::load(b).expect("chain spec is invalid") +} + /// Create a new Olympic chain spec. -pub fn new_olympic() -> Spec { Spec::load(include_bytes!("../../res/ethereum/olympic.json")) } +pub fn new_olympic() -> Spec { load(include_bytes!("../../res/ethereum/olympic.json")) } /// Create a new Frontier mainnet chain spec. -pub fn new_frontier() -> Spec { Spec::load(include_bytes!("../../res/ethereum/frontier.json")) } +pub fn new_frontier() -> Spec { load(include_bytes!("../../res/ethereum/frontier.json")) } /// Create a new Frontier mainnet chain spec without the DAO hardfork. -pub fn new_classic() -> Spec { Spec::load(include_bytes!("../../res/ethereum/classic.json")) } +pub fn new_classic() -> Spec { load(include_bytes!("../../res/ethereum/classic.json")) } /// Create a new Frontier chain spec as though it never changes to Homestead. -pub fn new_frontier_test() -> Spec { Spec::load(include_bytes!("../../res/ethereum/frontier_test.json")) } +pub fn new_frontier_test() -> Spec { load(include_bytes!("../../res/ethereum/frontier_test.json")) } /// Create a new Homestead chain spec as though it never changed from Frontier. -pub fn new_homestead_test() -> Spec { Spec::load(include_bytes!("../../res/ethereum/homestead_test.json")) } +pub fn new_homestead_test() -> Spec { load(include_bytes!("../../res/ethereum/homestead_test.json")) } /// Create a new Frontier/Homestead/DAO chain spec with transition points at #5 and #8. -pub fn new_daohardfork_test() -> Spec { Spec::load(include_bytes!("../../res/ethereum/daohardfork_test.json")) } +pub fn new_daohardfork_test() -> Spec { load(include_bytes!("../../res/ethereum/daohardfork_test.json")) } /// Create a new Frontier main net chain spec without genesis accounts. -pub fn new_mainnet_like() -> Spec { Spec::load(include_bytes!("../../res/ethereum/frontier_like_test.json")) } +pub fn new_mainnet_like() -> Spec { load(include_bytes!("../../res/ethereum/frontier_like_test.json")) } /// Create a new Morden chain spec. -pub fn new_morden() -> Spec { Spec::load(include_bytes!("../../res/ethereum/morden.json")) } +pub fn new_morden() -> Spec { load(include_bytes!("../../res/ethereum/morden.json")) } #[cfg(test)] mod tests { diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index d80ac0e33..58317e97b 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -244,18 +244,21 @@ impl Spec { } /// Loads spec from json file. - pub fn load(reader: &[u8]) -> Self { - From::from(ethjson::spec::Spec::load(reader).expect("invalid json file")) + pub fn load(reader: R) -> Result where R: Read { + match ethjson::spec::Spec::load(reader) { + Ok(spec) => Ok(spec.into()), + _ => Err("Spec json is invalid".into()), + } } /// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus. - pub fn new_test() -> Spec { - Spec::load(include_bytes!("../../res/null_morden.json")) + pub fn new_test() -> Self { + Spec::load(include_bytes!("../../res/null_morden.json") as &[u8]).expect("null_morden.json is invalid") } /// Create a new Spec which is a NullEngine consensus with a premine of address whose secret is sha3(''). - pub fn new_null() -> Spec { - Spec::load(include_bytes!("../../res/null.json")) + pub fn new_null() -> Self { + Spec::load(include_bytes!("../../res/null.json") as &[u8]).expect("null.json is invalid") } } @@ -267,6 +270,12 @@ mod tests { use views::*; use super::*; + // https://github.com/ethcore/parity/issues/1840 + #[test] + fn test_load_empty() { + assert!(Spec::load(&vec![] as &[u8]).is_err()); + } + #[test] fn test_chain() { let test_spec = Spec::new_test(); diff --git a/parity/params.rs b/parity/params.rs index 54a680414..c67520aa1 100644 --- a/parity/params.rs +++ b/parity/params.rs @@ -17,7 +17,7 @@ use std::str::FromStr; use std::fs; use std::time::Duration; -use util::{contents, H256, Address, U256, version_data}; +use util::{H256, Address, U256, version_data}; use util::journaldb::Algorithm; use ethcore::spec::Spec; use ethcore::ethereum; @@ -61,7 +61,10 @@ impl SpecType { SpecType::Testnet => Ok(ethereum::new_morden()), SpecType::Olympic => Ok(ethereum::new_olympic()), SpecType::Classic => Ok(ethereum::new_classic()), - SpecType::Custom(ref file) => Ok(Spec::load(&try!(contents(file).map_err(|_| "Could not load specification file.")))) + SpecType::Custom(ref filename) => { + let file = try!(fs::File::open(filename).map_err(|_| "Could not load specification file.")); + Spec::load(file) + } } } } diff --git a/util/src/misc.rs b/util/src/misc.rs index 62e8542db..50b2e7e8d 100644 --- a/util/src/misc.rs +++ b/util/src/misc.rs @@ -16,7 +16,6 @@ //! Diff misc. -use std::fs::File; use common::*; use rlp::{Stream, RlpStream}; use target_info::Target; @@ -33,14 +32,6 @@ pub enum Filth { Dirty, } -/// Read the whole contents of a file `name`. -pub fn contents(name: &str) -> Result { - let mut file = try!(File::open(name)); - let mut ret: Vec = Vec::new(); - try!(file.read_to_end(&mut ret)); - Ok(ret) -} - /// Get the standard version string for this software. pub fn version() -> String { let sha3 = short_sha(); @@ -64,4 +55,4 @@ pub fn version_data() -> Bytes { s.append(&rustc_version()); s.append(&&Target::os()[0..2]); s.out() -} \ No newline at end of file +} From 09bc675e6a594e04b0761250716c5c26a30586ea Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 5 Sep 2016 22:59:34 +0200 Subject: [PATCH 09/17] address grumbles --- parity/cli.rs | 2 +- parity/run.rs | 11 ----------- 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/parity/cli.rs b/parity/cli.rs index 91b9c1620..bb46bda13 100644 --- a/parity/cli.rs +++ b/parity/cli.rs @@ -384,7 +384,7 @@ pub struct Args { pub flag_from: String, pub flag_to: String, pub flag_at: String, - pub flag_no_periodic_snapshot: bool, + pub flag_no_periodic_snapshot: bool, pub flag_format: Option, pub flag_jitvm: bool, pub flag_log_file: Option, diff --git a/parity/run.rs b/parity/run.rs index 618a9f0db..7e81974c8 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -273,17 +273,6 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { }, }; - if !cmd.no_periodic_snapshot { - let watcher = snapshot::Watcher::new( - service.client(), - service.io().channel(), - SNAPSHOT_PERIOD, - SNAPSHOT_HISTORY, - ); - - service.add_notify(Arc::new(watcher)); - } - // start ui if cmd.ui { if !cmd.dapps_conf.enabled { From 5c5d9c8ccdd4ba84cee53aadf6512a1cb1ea5d94 Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Tue, 6 Sep 2016 15:31:13 +0200 Subject: [PATCH 10/17] Snapshot sync (#2047) * PV64 sync * Tests * Client DB restore * Snapshot restoration over IPC * Upating test * Minor tweaks * Upating test --- ethcore/build.rs | 1 + ethcore/src/block_queue.rs | 2 +- ethcore/src/blockchain/config.rs | 2 +- ethcore/src/client/client.rs | 199 +++++---- ethcore/src/client/config.rs | 2 +- ethcore/src/miner/miner.rs | 5 + ethcore/src/service.rs | 21 +- ethcore/src/snapshot/mod.rs | 64 +-- ethcore/src/snapshot/service.rs | 123 ++---- .../src/snapshot/snapshot_service_trait.rs | 54 +++ ethcore/src/types/mod.rs.in | 2 + ethcore/src/types/restoration_status.rs | 34 ++ ethcore/src/types/snapshot_manifest.rs | 70 ++++ ethcore/src/verification/mod.rs | 2 +- parity/main.rs | 22 +- parity/modules.rs | 7 +- parity/run.rs | 3 +- parity/snapshot.rs | 7 +- parity/sync.rs | 4 +- rpc/src/v1/impls/eth.rs | 3 +- rpc/src/v1/tests/helpers/sync_provider.rs | 2 + sync/src/api.rs | 27 +- sync/src/chain.rs | 387 +++++++++++++++--- sync/src/lib.rs | 36 +- sync/src/snapshot.rs | 200 +++++++++ sync/src/sync_io.rs | 20 +- sync/src/tests/helpers.rs | 29 +- sync/src/tests/mod.rs | 1 + sync/src/tests/snapshot.rs | 123 ++++++ util/network/src/host.rs | 6 + util/network/src/session.rs | 5 + util/src/kvdb.rs | 215 +++++++--- 32 files changed, 1258 insertions(+), 420 deletions(-) create mode 100644 ethcore/src/snapshot/snapshot_service_trait.rs create mode 100644 ethcore/src/types/restoration_status.rs create mode 100644 ethcore/src/types/snapshot_manifest.rs create mode 100644 sync/src/snapshot.rs create mode 100644 sync/src/tests/snapshot.rs diff --git a/ethcore/build.rs b/ethcore/build.rs index 2e07cbc2f..b83955708 100644 --- a/ethcore/build.rs +++ b/ethcore/build.rs @@ -19,5 +19,6 @@ extern crate ethcore_ipc_codegen; fn main() { ethcore_ipc_codegen::derive_binary("src/types/mod.rs.in").unwrap(); ethcore_ipc_codegen::derive_ipc("src/client/traits.rs").unwrap(); + ethcore_ipc_codegen::derive_ipc("src/snapshot/snapshot_service_trait.rs").unwrap(); ethcore_ipc_codegen::derive_ipc("src/client/chain_notify.rs").unwrap(); } diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs index 7d686cec0..c441136fd 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/block_queue.rs @@ -37,7 +37,7 @@ const MIN_MEM_LIMIT: usize = 16384; const MIN_QUEUE_LIMIT: usize = 512; /// Block queue configuration -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub struct BlockQueueConfig { /// Maximum number of blocks to keep in unverified queue. /// When the limit is reached, is_full returns true. diff --git a/ethcore/src/blockchain/config.rs b/ethcore/src/blockchain/config.rs index 1a0ab9d42..324474958 100644 --- a/ethcore/src/blockchain/config.rs +++ b/ethcore/src/blockchain/config.rs @@ -17,7 +17,7 @@ //! Blockchain configuration. /// Blockchain configuration. -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub struct Config { /// Preferred cache size in bytes. pub pref_cache_size: usize, diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 554afab38..dfb4f3ad3 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -32,7 +32,7 @@ use util::kvdb::*; // other use io::*; use views::{BlockView, HeaderView, BodyView}; -use error::{ImportError, ExecutionError, CallError, BlockError, ImportResult}; +use error::{ImportError, ExecutionError, CallError, BlockError, ImportResult, Error as EthcoreError}; use header::BlockNumber; use state::State; use spec::Spec; @@ -122,11 +122,13 @@ impl SleepState { /// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue. pub struct Client { mode: Mode, - chain: Arc, - tracedb: Arc>, + chain: RwLock>, + tracedb: RwLock>, engine: Arc, - db: Arc, - state_db: Mutex>, + config: ClientConfig, + db: RwLock>, + pruning: journaldb::Algorithm, + state_db: RwLock>, block_queue: BlockQueue, report: RwLock, import_lock: Mutex<()>, @@ -168,8 +170,8 @@ impl Client { db_config.wal = config.db_wal; let db = Arc::new(try!(Database::open(&db_config, &path.to_str().unwrap()).map_err(ClientError::Database))); - let chain = Arc::new(BlockChain::new(config.blockchain, &gb, db.clone())); - let tracedb = Arc::new(try!(TraceDB::new(config.tracing, db.clone(), chain.clone()))); + let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone())); + let tracedb = RwLock::new(try!(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone()))); let mut state_db = journaldb::new(db.clone(), config.pruning, ::db::COL_STATE); if state_db.is_empty() && try!(spec.ensure_db_good(state_db.as_hashdb_mut())) { @@ -184,32 +186,34 @@ impl Client { let engine = spec.engine.clone(); - let block_queue = BlockQueue::new(config.queue, engine.clone(), message_channel.clone()); + let block_queue = BlockQueue::new(config.queue.clone(), engine.clone(), message_channel.clone()); let panic_handler = PanicHandler::new_in_arc(); panic_handler.forward_from(&block_queue); let awake = match config.mode { Mode::Dark(..) => false, _ => true }; let factories = Factories { - vm: EvmFactory::new(config.vm_type), - trie: TrieFactory::new(config.trie_spec), + vm: EvmFactory::new(config.vm_type.clone()), + trie: TrieFactory::new(config.trie_spec.clone()), accountdb: Default::default(), }; let client = Client { sleep_state: Mutex::new(SleepState::new(awake)), liveness: AtomicBool::new(awake), - mode: config.mode, - chain: chain, + mode: config.mode.clone(), + chain: RwLock::new(chain), tracedb: tracedb, engine: engine, - db: db, - state_db: Mutex::new(state_db), + pruning: config.pruning.clone(), + verifier: verification::new(config.verifier_type.clone()), + config: config, + db: RwLock::new(db), + state_db: RwLock::new(state_db), block_queue: block_queue, report: RwLock::new(Default::default()), import_lock: Mutex::new(()), panic_handler: panic_handler, - verifier: verification::new(config.verifier_type), miner: miner, io_channel: message_channel, notify: RwLock::new(Vec::new()), @@ -253,8 +257,9 @@ impl Client { let mut last_hashes = LastHashes::new(); last_hashes.resize(256, H256::default()); last_hashes[0] = parent_hash; + let chain = self.chain.read(); for i in 0..255 { - match self.chain.block_details(&last_hashes[i]) { + match chain.block_details(&last_hashes[i]) { Some(details) => { last_hashes[i + 1] = details.parent.clone(); }, @@ -270,22 +275,23 @@ impl Client { let engine = &*self.engine; let header = &block.header; + let chain = self.chain.read(); // Check the block isn't so old we won't be able to enact it. - let best_block_number = self.chain.best_block_number(); + let best_block_number = chain.best_block_number(); if best_block_number >= HISTORY && header.number() <= best_block_number - HISTORY { warn!(target: "client", "Block import failed for #{} ({})\nBlock is ancient (current best block: #{}).", header.number(), header.hash(), best_block_number); return Err(()); } // Verify Block Family - let verify_family_result = self.verifier.verify_block_family(header, &block.bytes, engine, &*self.chain); + let verify_family_result = self.verifier.verify_block_family(header, &block.bytes, engine, &**chain); if let Err(e) = verify_family_result { warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); return Err(()); }; // Check if Parent is in chain - let chain_has_parent = self.chain.block_header(header.parent_hash()); + let chain_has_parent = chain.block_header(header.parent_hash()); if let None = chain_has_parent { warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash()); return Err(()); @@ -294,9 +300,9 @@ impl Client { // Enact Verified Block let parent = chain_has_parent.unwrap(); let last_hashes = self.build_last_hashes(header.parent_hash().clone()); - let db = self.state_db.lock().boxed_clone(); + let db = self.state_db.read().boxed_clone(); - let enact_result = enact_verified(block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, self.factories.clone()); + let enact_result = enact_verified(block, engine, self.tracedb.read().tracing_enabled(), db, &parent, last_hashes, self.factories.clone()); if let Err(e) = enact_result { warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); return Err(()); @@ -408,17 +414,18 @@ impl Client { } } - self.db.flush().expect("DB flush failed."); + self.db.read().flush().expect("DB flush failed."); imported } fn commit_block(&self, block: B, hash: &H256, block_data: &[u8]) -> ImportRoute where B: IsBlock + Drain { let number = block.header().number(); let parent = block.header().parent_hash().clone(); + let chain = self.chain.read(); // Are we committing an era? let ancient = if number >= HISTORY { let n = number - HISTORY; - Some((n, self.chain.block_hash(n).unwrap())) + Some((n, chain.block_hash(n).unwrap())) } else { None }; @@ -432,14 +439,14 @@ impl Client { //let traces = From::from(block.traces().clone().unwrap_or_else(Vec::new)); - let mut batch = DBTransaction::new(&self.db); + let mut batch = DBTransaction::new(&self.db.read()); // CHECK! I *think* this is fine, even if the state_root is equal to another // already-imported block of the same number. // TODO: Prove it with a test. block.drain().commit(&mut batch, number, hash, ancient).expect("DB commit failed."); - let route = self.chain.insert_block(&mut batch, block_data, receipts); - self.tracedb.import(&mut batch, TraceImportRequest { + let route = chain.insert_block(&mut batch, block_data, receipts); + self.tracedb.read().import(&mut batch, TraceImportRequest { traces: traces.into(), block_hash: hash.clone(), block_number: number, @@ -447,8 +454,8 @@ impl Client { retracted: route.retracted.len() }); // Final commit to the DB - self.db.write_buffered(batch); - self.chain.commit(); + self.db.read().write_buffered(batch); + chain.commit(); self.update_last_hashes(&parent, hash); route @@ -491,10 +498,10 @@ impl Client { }; self.block_header(id).and_then(|header| { - let db = self.state_db.lock().boxed_clone(); + let db = self.state_db.read().boxed_clone(); // early exit for pruned blocks - if db.is_pruned() && self.chain.best_block_number() >= block_number + HISTORY { + if db.is_pruned() && self.chain.read().best_block_number() >= block_number + HISTORY { return None; } @@ -522,7 +529,7 @@ impl Client { /// Get a copy of the best block's state. pub fn state(&self) -> State { State::from_existing( - self.state_db.lock().boxed_clone(), + self.state_db.read().boxed_clone(), HeaderView::new(&self.best_block_header()).state_root(), self.engine.account_start_nonce(), self.factories.clone()) @@ -531,22 +538,22 @@ impl Client { /// Get info on the cache. pub fn blockchain_cache_info(&self) -> BlockChainCacheSize { - self.chain.cache_size() + self.chain.read().cache_size() } /// Get the report. pub fn report(&self) -> ClientReport { let mut report = self.report.read().clone(); - report.state_db_mem = self.state_db.lock().mem_used(); + report.state_db_mem = self.state_db.read().mem_used(); report } /// Tick the client. // TODO: manage by real events. pub fn tick(&self) { - self.chain.collect_garbage(); + self.chain.read().collect_garbage(); self.block_queue.collect_garbage(); - self.tracedb.collect_garbage(); + self.tracedb.read().collect_garbage(); match self.mode { Mode::Dark(timeout) => { @@ -584,16 +591,16 @@ impl Client { pub fn block_number(&self, id: BlockID) -> Option { match id { BlockID::Number(number) => Some(number), - BlockID::Hash(ref hash) => self.chain.block_number(hash), + BlockID::Hash(ref hash) => self.chain.read().block_number(hash), BlockID::Earliest => Some(0), - BlockID::Latest | BlockID::Pending => Some(self.chain.best_block_number()), + BlockID::Latest | BlockID::Pending => Some(self.chain.read().best_block_number()), } } /// Take a snapshot at the given block. /// If the ID given is "latest", this will default to 1000 blocks behind. - pub fn take_snapshot(&self, writer: W, at: BlockID, p: &snapshot::Progress) -> Result<(), ::error::Error> { - let db = self.state_db.lock().boxed_clone(); + pub fn take_snapshot(&self, writer: W, at: BlockID, p: &snapshot::Progress) -> Result<(), EthcoreError> { + let db = self.state_db.read().boxed_clone(); let best_block_number = self.chain_info().best_block_number; let block_number = try!(self.block_number(at).ok_or(snapshot::Error::InvalidStartingBlock(at))); @@ -618,7 +625,7 @@ impl Client { }, }; - try!(snapshot::take_snapshot(&self.chain, start_hash, db.as_hashdb(), writer, p)); + try!(snapshot::take_snapshot(&self.chain.read(), start_hash, db.as_hashdb(), writer, p)); Ok(()) } @@ -634,8 +641,8 @@ impl Client { fn transaction_address(&self, id: TransactionID) -> Option { match id { - TransactionID::Hash(ref hash) => self.chain.transaction_address(hash), - TransactionID::Location(id, index) => Self::block_hash(&self.chain, id).map(|hash| TransactionAddress { + TransactionID::Hash(ref hash) => self.chain.read().transaction_address(hash), + TransactionID::Location(id, index) => Self::block_hash(&self.chain.read(), id).map(|hash| TransactionAddress { block_hash: hash, index: index, }) @@ -666,6 +673,25 @@ impl Client { } } +impl snapshot::DatabaseRestore for Client { + /// Restart the client with a new backend + fn restore_db(&self, new_db: &str) -> Result<(), EthcoreError> { + let _import_lock = self.import_lock.lock(); + let mut state_db = self.state_db.write(); + let mut chain = self.chain.write(); + let mut tracedb = self.tracedb.write(); + self.miner.clear(); + let db = self.db.write(); + try!(db.restore(new_db)); + + *state_db = journaldb::new(db.clone(), self.pruning, ::db::COL_STATE); + *chain = Arc::new(BlockChain::new(self.config.blockchain.clone(), &[], db.clone())); + *tracedb = try!(TraceDB::new(self.config.tracing.clone(), db.clone(), chain.clone()).map_err(ClientError::from)); + Ok(()) + } +} + + impl BlockChainClient for Client { fn call(&self, t: &SignedTransaction, block: BlockID, analytics: CallAnalytics) -> Result { let header = try!(self.block_header(block).ok_or(CallError::StatePruned)); @@ -749,15 +775,17 @@ impl BlockChainClient for Client { } fn best_block_header(&self) -> Bytes { - self.chain.best_block_header() + self.chain.read().best_block_header() } fn block_header(&self, id: BlockID) -> Option { - Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_header_data(&hash)) + let chain = self.chain.read(); + Self::block_hash(&chain, id).and_then(|hash| chain.block_header_data(&hash)) } fn block_body(&self, id: BlockID) -> Option { - Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_body(&hash)) + let chain = self.chain.read(); + Self::block_hash(&chain, id).and_then(|hash| chain.block_body(&hash)) } fn block(&self, id: BlockID) -> Option { @@ -766,14 +794,16 @@ impl BlockChainClient for Client { return Some(block.rlp_bytes(Seal::Without)); } } - Self::block_hash(&self.chain, id).and_then(|hash| { - self.chain.block(&hash) + let chain = self.chain.read(); + Self::block_hash(&chain, id).and_then(|hash| { + chain.block(&hash) }) } fn block_status(&self, id: BlockID) -> BlockStatus { - match Self::block_hash(&self.chain, id) { - Some(ref hash) if self.chain.is_known(hash) => BlockStatus::InChain, + let chain = self.chain.read(); + match Self::block_hash(&chain, id) { + Some(ref hash) if chain.is_known(hash) => BlockStatus::InChain, Some(hash) => self.block_queue.block_status(&hash), None => BlockStatus::Unknown } @@ -785,7 +815,8 @@ impl BlockChainClient for Client { return Some(*block.header.difficulty() + self.block_total_difficulty(BlockID::Latest).expect("blocks in chain have details; qed")); } } - Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_details(&hash)).map(|d| d.total_difficulty) + let chain = self.chain.read(); + Self::block_hash(&chain, id).and_then(|hash| chain.block_details(&hash)).map(|d| d.total_difficulty) } fn nonce(&self, address: &Address, id: BlockID) -> Option { @@ -793,7 +824,8 @@ impl BlockChainClient for Client { } fn block_hash(&self, id: BlockID) -> Option { - Self::block_hash(&self.chain, id) + let chain = self.chain.read(); + Self::block_hash(&chain, id) } fn code(&self, address: &Address, id: BlockID) -> Option> { @@ -809,7 +841,7 @@ impl BlockChainClient for Client { } fn transaction(&self, id: TransactionID) -> Option { - self.transaction_address(id).and_then(|address| self.chain.transaction(&address)) + self.transaction_address(id).and_then(|address| self.chain.read().transaction(&address)) } fn uncle(&self, id: UncleID) -> Option { @@ -818,11 +850,12 @@ impl BlockChainClient for Client { } fn transaction_receipt(&self, id: TransactionID) -> Option { - self.transaction_address(id).and_then(|address| self.chain.block_number(&address.block_hash).and_then(|block_number| { - let t = self.chain.block_body(&address.block_hash) + let chain = self.chain.read(); + self.transaction_address(id).and_then(|address| chain.block_number(&address.block_hash).and_then(|block_number| { + let t = chain.block_body(&address.block_hash) .and_then(|block| BodyView::new(&block).localized_transaction_at(&address.block_hash, block_number, address.index)); - match (t, self.chain.transaction_receipt(&address)) { + match (t, chain.transaction_receipt(&address)) { (Some(tx), Some(receipt)) => { let block_hash = tx.block_hash.clone(); let block_number = tx.block_number.clone(); @@ -832,7 +865,7 @@ impl BlockChainClient for Client { 0 => U256::zero(), i => { let prior_address = TransactionAddress { block_hash: address.block_hash, index: i - 1 }; - let prior_receipt = self.chain.transaction_receipt(&prior_address).expect("Transaction receipt at `address` exists; `prior_address` has lower index in same block; qed"); + let prior_receipt = chain.transaction_receipt(&prior_address).expect("Transaction receipt at `address` exists; `prior_address` has lower index in same block; qed"); prior_receipt.gas_used } }; @@ -863,28 +896,29 @@ impl BlockChainClient for Client { } fn tree_route(&self, from: &H256, to: &H256) -> Option { - match self.chain.is_known(from) && self.chain.is_known(to) { - true => Some(self.chain.tree_route(from.clone(), to.clone())), + let chain = self.chain.read(); + match chain.is_known(from) && chain.is_known(to) { + true => Some(chain.tree_route(from.clone(), to.clone())), false => None } } fn find_uncles(&self, hash: &H256) -> Option> { - self.chain.find_uncle_hashes(hash, self.engine.maximum_uncle_age()) + self.chain.read().find_uncle_hashes(hash, self.engine.maximum_uncle_age()) } fn state_data(&self, hash: &H256) -> Option { - self.state_db.lock().state(hash) + self.state_db.read().state(hash) } fn block_receipts(&self, hash: &H256) -> Option { - self.chain.block_receipts(hash).map(|receipts| ::rlp::encode(&receipts).to_vec()) + self.chain.read().block_receipts(hash).map(|receipts| ::rlp::encode(&receipts).to_vec()) } fn import_block(&self, bytes: Bytes) -> Result { { let header = BlockView::new(&bytes).header_view(); - if self.chain.is_known(&header.sha3()) { + if self.chain.read().is_known(&header.sha3()) { return Err(BlockImportError::Import(ImportError::AlreadyInChain)); } if self.block_status(BlockID::Hash(header.parent_hash())) == BlockStatus::Unknown { @@ -903,12 +937,13 @@ impl BlockChainClient for Client { } fn chain_info(&self) -> BlockChainInfo { + let chain = self.chain.read(); BlockChainInfo { - total_difficulty: self.chain.best_block_total_difficulty(), - pending_total_difficulty: self.chain.best_block_total_difficulty(), - genesis_hash: self.chain.genesis_hash(), - best_block_hash: self.chain.best_block_hash(), - best_block_number: From::from(self.chain.best_block_number()) + total_difficulty: chain.best_block_total_difficulty(), + pending_total_difficulty: chain.best_block_total_difficulty(), + genesis_hash: chain.genesis_hash(), + best_block_hash: chain.best_block_hash(), + best_block_number: From::from(chain.best_block_number()) } } @@ -918,7 +953,7 @@ impl BlockChainClient for Client { fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockID, to_block: BlockID) -> Option> { match (self.block_number(from_block), self.block_number(to_block)) { - (Some(from), Some(to)) => Some(self.chain.blocks_with_bloom(bloom, from, to)), + (Some(from), Some(to)) => Some(self.chain.read().blocks_with_bloom(bloom, from, to)), _ => None } } @@ -936,10 +971,11 @@ impl BlockChainClient for Client { blocks.sort(); + let chain = self.chain.read(); blocks.into_iter() - .filter_map(|number| self.chain.block_hash(number).map(|hash| (number, hash))) - .filter_map(|(number, hash)| self.chain.block_receipts(&hash).map(|r| (number, hash, r.receipts))) - .filter_map(|(number, hash, receipts)| self.chain.block_body(&hash).map(|ref b| (number, hash, receipts, BodyView::new(b).transaction_hashes()))) + .filter_map(|number| chain.block_hash(number).map(|hash| (number, hash))) + .filter_map(|(number, hash)| chain.block_receipts(&hash).map(|r| (number, hash, r.receipts))) + .filter_map(|(number, hash, receipts)| chain.block_body(&hash).map(|ref b| (number, hash, receipts, BodyView::new(b).transaction_hashes()))) .flat_map(|(number, hash, receipts, hashes)| { let mut log_index = 0; receipts.into_iter() @@ -975,7 +1011,7 @@ impl BlockChainClient for Client { to_address: From::from(filter.to_address), }; - let traces = self.tracedb.filter(&filter); + let traces = self.tracedb.read().filter(&filter); Some(traces) } else { None @@ -987,7 +1023,7 @@ impl BlockChainClient for Client { self.transaction_address(trace.transaction) .and_then(|tx_address| { self.block_number(BlockID::Hash(tx_address.block_hash)) - .and_then(|number| self.tracedb.trace(number, tx_address.index, trace_address)) + .and_then(|number| self.tracedb.read().trace(number, tx_address.index, trace_address)) }) } @@ -995,17 +1031,17 @@ impl BlockChainClient for Client { self.transaction_address(transaction) .and_then(|tx_address| { self.block_number(BlockID::Hash(tx_address.block_hash)) - .and_then(|number| self.tracedb.transaction_traces(number, tx_address.index)) + .and_then(|number| self.tracedb.read().transaction_traces(number, tx_address.index)) }) } fn block_traces(&self, block: BlockID) -> Option> { self.block_number(block) - .and_then(|number| self.tracedb.block_traces(number)) + .and_then(|number| self.tracedb.read().block_traces(number)) } fn last_hashes(&self) -> LastHashes { - (*self.build_last_hashes(self.chain.best_block_hash())).clone() + (*self.build_last_hashes(self.chain.read().best_block_hash())).clone() } fn queue_transactions(&self, transactions: Vec) { @@ -1032,14 +1068,15 @@ impl BlockChainClient for Client { impl MiningBlockChainClient for Client { fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) -> OpenBlock { let engine = &*self.engine; - let h = self.chain.best_block_hash(); + let chain = self.chain.read(); + let h = chain.best_block_hash(); let mut open_block = OpenBlock::new( engine, self.factories.clone(), false, // TODO: this will need to be parameterised once we want to do immediate mining insertion. - self.state_db.lock().boxed_clone(), - &self.chain.block_header(&h).expect("h is best block hash: so its header must exist: qed"), + self.state_db.read().boxed_clone(), + &chain.block_header(&h).expect("h is best block hash: so its header must exist: qed"), self.build_last_hashes(h.clone()), author, gas_range_target, @@ -1047,7 +1084,7 @@ impl MiningBlockChainClient for Client { ).expect("OpenBlock::new only fails if parent state root invalid; state root of best block's header is never invalid; qed"); // Add uncles - self.chain + chain .find_uncle_headers(&h, engine.maximum_uncle_age()) .unwrap() .into_iter() @@ -1088,7 +1125,7 @@ impl MiningBlockChainClient for Client { precise_time_ns() - start, ); }); - self.db.flush().expect("DB flush failed."); + self.db.read().flush().expect("DB flush failed."); Ok(h) } } diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs index 504ca4de7..bb70de6cd 100644 --- a/ethcore/src/client/config.rs +++ b/ethcore/src/client/config.rs @@ -62,7 +62,7 @@ impl FromStr for DatabaseCompactionProfile { } /// Operating mode for the client. -#[derive(Debug, Eq, PartialEq)] +#[derive(Debug, Eq, PartialEq, Clone)] pub enum Mode { /// Always on. Active, diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index a2533ecde..c9d60f075 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -227,6 +227,11 @@ impl Miner { self.options.force_sealing || !self.options.new_work_notify.is_empty() } + /// Clear all pending block states + pub fn clear(&self) { + self.sealing_work.lock().queue.reset(); + } + /// Get `Some` `clone()` of the current pending block's state or `None` if we're not sealing. pub fn pending_state(&self) -> Option { self.sealing_work.lock().queue.peek_last_ref().map(|b| b.block().fields().state.clone()) diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index e2e4772a4..4c26e2e44 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -78,7 +78,7 @@ impl ClientService { let pruning = config.pruning; let client = try!(Client::new(config, &spec, db_path, miner, io_service.channel())); - let snapshot = try!(SnapshotService::new(spec, pruning, db_path.into(), io_service.channel())); + let snapshot = try!(SnapshotService::new(spec, pruning, db_path.into(), io_service.channel(), client.clone())); let snapshot = Arc::new(snapshot); @@ -90,7 +90,7 @@ impl ClientService { try!(io_service.register_handler(client_io)); let stop_guard = ::devtools::StopGuard::new(); - run_ipc(ipc_path, client.clone(), stop_guard.share()); + run_ipc(ipc_path, client.clone(), snapshot.clone(), stop_guard.share()); Ok(ClientService { io_service: Arc::new(io_service), @@ -176,14 +176,27 @@ impl IoHandler for ClientIoHandler { } #[cfg(feature="ipc")] -fn run_ipc(base_path: &Path, client: Arc, stop: Arc) { +fn run_ipc(base_path: &Path, client: Arc, snapshot_service: Arc, stop: Arc) { let mut path = base_path.to_owned(); path.push("parity-chain.ipc"); let socket_addr = format!("ipc://{}", path.to_string_lossy()); + let s = stop.clone(); ::std::thread::spawn(move || { let mut worker = nanoipc::Worker::new(&(client as Arc)); worker.add_reqrep(&socket_addr).expect("Ipc expected to initialize with no issues"); + while !s.load(::std::sync::atomic::Ordering::Relaxed) { + worker.poll(); + } + }); + + let mut path = base_path.to_owned(); + path.push("parity-snapshot.ipc"); + let socket_addr = format!("ipc://{}", path.to_string_lossy()); + ::std::thread::spawn(move || { + let mut worker = nanoipc::Worker::new(&(snapshot_service as Arc<::snapshot::SnapshotService>)); + worker.add_reqrep(&socket_addr).expect("Ipc expected to initialize with no issues"); + while !stop.load(::std::sync::atomic::Ordering::Relaxed) { worker.poll(); } @@ -191,7 +204,7 @@ fn run_ipc(base_path: &Path, client: Arc, stop: Arc) { } #[cfg(not(feature="ipc"))] -fn run_ipc(_base_path: &Path, _client: Arc, _stop: Arc) { +fn run_ipc(_base_path: &Path, _client: Arc, _snapshot_service: Arc, _stop: Arc) { } #[cfg(test)] diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index 2a81b967d..ab23be9b3 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -32,9 +32,9 @@ use util::Mutex; use util::hash::{FixedHash, H256}; use util::journaldb::{self, Algorithm, JournalDB}; use util::kvdb::Database; -use util::sha3::SHA3_NULL_RLP; use util::trie::{TrieDB, TrieDBMut, Trie, TrieMut}; -use rlp::{DecoderError, RlpStream, Stream, UntrustedRlp, View, Compressible, RlpType}; +use util::sha3::SHA3_NULL_RLP; +use rlp::{RlpStream, Stream, UntrustedRlp, View, Compressible, RlpType}; use self::account::Account; use self::block::AbridgedBlock; @@ -44,7 +44,10 @@ use crossbeam::{scope, ScopedJoinHandle}; use rand::{Rng, OsRng}; pub use self::error::Error; -pub use self::service::{RestorationStatus, Service, SnapshotService}; +pub use self::service::{Service, DatabaseRestore}; +pub use self::traits::{SnapshotService, RemoteSnapshotService}; +pub use types::snapshot_manifest::ManifestData; +pub use types::restoration_status::RestorationStatus; pub mod io; pub mod service; @@ -56,6 +59,11 @@ mod error; #[cfg(test)] mod tests; +mod traits { + #![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues + include!(concat!(env!("OUT_DIR"), "/snapshot_service_trait.rs")); +} + // Try to have chunks be around 4MB (before compression) const PREFERRED_CHUNK_SIZE: usize = 4 * 1024 * 1024; @@ -354,54 +362,6 @@ pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex, - /// List of block chunk hashes. - pub block_hashes: Vec, - /// The final, expected state root. - pub state_root: H256, - /// Block number this snapshot was taken at. - pub block_number: u64, - /// Block hash this snapshot was taken at. - pub block_hash: H256, -} - -impl ManifestData { - /// Encode the manifest data to rlp. - pub fn into_rlp(self) -> Bytes { - let mut stream = RlpStream::new_list(5); - stream.append(&self.state_hashes); - stream.append(&self.block_hashes); - stream.append(&self.state_root); - stream.append(&self.block_number); - stream.append(&self.block_hash); - - stream.out() - } - - /// Try to restore manifest data from raw bytes, interpreted as RLP. - pub fn from_rlp(raw: &[u8]) -> Result { - let decoder = UntrustedRlp::new(raw); - - let state_hashes: Vec = try!(decoder.val_at(0)); - let block_hashes: Vec = try!(decoder.val_at(1)); - let state_root: H256 = try!(decoder.val_at(2)); - let block_number: u64 = try!(decoder.val_at(3)); - let block_hash: H256 = try!(decoder.val_at(4)); - - Ok(ManifestData { - state_hashes: state_hashes, - block_hashes: block_hashes, - state_root: state_root, - block_number: block_number, - block_hash: block_hash, - }) - } -} - /// Used to rebuild the state trie piece by piece. pub struct StateRebuilder { db: Box, @@ -653,4 +613,4 @@ impl BlockRebuilder { } } } -} \ No newline at end of file +} diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index 40f629ad9..f91821b22 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -23,7 +23,7 @@ use std::path::{Path, PathBuf}; use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; -use super::{ManifestData, StateRebuilder, BlockRebuilder}; +use super::{ManifestData, StateRebuilder, BlockRebuilder, RestorationStatus, SnapshotService}; use super::io::{SnapshotReader, LooseReader, SnapshotWriter, LooseWriter}; use blockchain::BlockChain; @@ -39,51 +39,10 @@ use util::journaldb::Algorithm; use util::kvdb::{Database, DatabaseConfig}; use util::snappy; -/// Statuses for restorations. -#[derive(PartialEq, Eq, Clone, Copy, Debug)] -pub enum RestorationStatus { - /// No restoration. - Inactive, - /// Ongoing restoration. - Ongoing, - /// Failed restoration. - Failed, -} - -/// The interface for a snapshot network service. -/// This handles: -/// - restoration of snapshots to temporary databases. -/// - responding to queries for snapshot manifests and chunks -pub trait SnapshotService { - /// Query the most recent manifest data. - fn manifest(&self) -> Option; - - /// Get raw chunk for a given hash. - fn chunk(&self, hash: H256) -> Option; - - /// Ask the snapshot service for the restoration status. - fn status(&self) -> RestorationStatus; - - /// Ask the snapshot service for the number of chunks completed. - /// Return a tuple of (state_chunks, block_chunks). - /// Undefined when not restoring. - fn chunks_done(&self) -> (usize, usize); - - /// Begin snapshot restoration. - /// If restoration in-progress, this will reset it. - /// From this point on, any previous snapshot may become unavailable. - fn begin_restore(&self, manifest: ManifestData); - - /// Abort an in-progress restoration if there is one. - fn abort_restore(&self); - - /// Feed a raw state chunk to the service to be processed asynchronously. - /// no-op if not currently restoring. - fn restore_state_chunk(&self, hash: H256, chunk: Bytes); - - /// Feed a raw block chunk to the service to be processed asynchronously. - /// no-op if currently restoring. - fn restore_block_chunk(&self, hash: H256, chunk: Bytes); +/// External database restoration handler +pub trait DatabaseRestore : Send + Sync { + /// Restart with a new backend. Takes ownership of passed database and moves it to a new location. + fn restore_db(&self, new_db: &str) -> Result<(), Error>; } /// State restoration manager. @@ -208,11 +167,12 @@ pub struct Service { genesis_block: Bytes, state_chunks: AtomicUsize, block_chunks: AtomicUsize, + db_restore: Arc, } impl Service { /// Create a new snapshot service. - pub fn new(spec: &Spec, pruning: Algorithm, client_db: PathBuf, io_channel: Channel) -> Result { + pub fn new(spec: &Spec, pruning: Algorithm, client_db: PathBuf, io_channel: Channel, db_restore: Arc) -> Result { let db_path = try!(client_db.parent().and_then(Path::parent) .ok_or_else(|| UtilError::SimpleString("Failed to find database root.".into()))).to_owned(); @@ -236,6 +196,7 @@ impl Service { genesis_block: spec.genesis_block(), state_chunks: AtomicUsize::new(0), block_chunks: AtomicUsize::new(0), + db_restore: db_restore, }; // create the root snapshot dir if it doesn't exist. @@ -295,37 +256,8 @@ impl Service { let our_db = self.restoration_db(); trace!(target: "snapshot", "replacing {:?} with {:?}", self.client_db, our_db); - - let mut backup_db = self.restoration_dir(); - backup_db.push("backup_db"); - - let _ = fs::remove_dir_all(&backup_db); - - let existed = match fs::rename(&self.client_db, &backup_db) { - Ok(_) => true, - Err(e) => if let ErrorKind::NotFound = e.kind() { - false - } else { - return Err(e.into()); - } - }; - - match fs::rename(&our_db, &self.client_db) { - Ok(_) => { - // clean up the backup. - if existed { - try!(fs::remove_dir_all(&backup_db)); - } - Ok(()) - } - Err(e) => { - // restore the backup. - if existed { - try!(fs::rename(&backup_db, &self.client_db)); - } - Err(e.into()) - } - } + try!(self.db_restore.restore_db(our_db.to_str().unwrap())); + Ok(()) } /// Initialize the restoration synchronously. @@ -360,7 +292,10 @@ impl Service { *res = Some(try!(Restoration::new(params))); - *self.status.lock() = RestorationStatus::Ongoing; + *self.status.lock() = RestorationStatus::Ongoing { + state_chunks_done: self.state_chunks.load(Ordering::Relaxed) as u32, + block_chunks_done: self.block_chunks.load(Ordering::Relaxed) as u32, + }; Ok(()) } @@ -418,7 +353,7 @@ impl Service { match self.status() { RestorationStatus::Inactive | RestorationStatus::Failed => Ok(()), - RestorationStatus::Ongoing => { + RestorationStatus::Ongoing { .. } => { let res = { let rest = match *restoration { Some(ref mut r) => r, @@ -489,10 +424,6 @@ impl SnapshotService for Service { *self.status.lock() } - fn chunks_done(&self) -> (usize, usize) { - (self.state_chunks.load(Ordering::Relaxed), self.block_chunks.load(Ordering::Relaxed)) - } - fn begin_restore(&self, manifest: ManifestData) { self.io_channel.send(ClientIoMessage::BeginRestoration(manifest)) .expect("snapshot service and io service are kept alive by client service; qed"); @@ -522,15 +453,23 @@ impl SnapshotService for Service { #[cfg(test)] mod tests { + use std::sync::Arc; use service::ClientIoMessage; use io::{IoService}; use devtools::RandomTempPath; use tests::helpers::get_test_spec; use util::journaldb::Algorithm; - - use snapshot::ManifestData; + use error::Error; + use snapshot::{ManifestData, RestorationStatus, SnapshotService}; use super::*; + struct NoopDBRestore; + impl DatabaseRestore for NoopDBRestore { + fn restore_db(&self, _new_db: &str) -> Result<(), Error> { + Ok(()) + } + } + #[test] fn sends_async_messages() { let service = IoService::::start().unwrap(); @@ -544,13 +483,13 @@ mod tests { &get_test_spec(), Algorithm::Archive, dir, - service.channel() + service.channel(), + Arc::new(NoopDBRestore), ).unwrap(); assert!(service.manifest().is_none()); assert!(service.chunk(Default::default()).is_none()); assert_eq!(service.status(), RestorationStatus::Inactive); - assert_eq!(service.chunks_done(), (0, 0)); let manifest = ManifestData { state_hashes: vec![], @@ -565,4 +504,10 @@ mod tests { service.restore_state_chunk(Default::default(), vec![]); service.restore_block_chunk(Default::default(), vec![]); } -} \ No newline at end of file +} + +impl Drop for Service { + fn drop(&mut self) { + self.abort_restore(); + } +} diff --git a/ethcore/src/snapshot/snapshot_service_trait.rs b/ethcore/src/snapshot/snapshot_service_trait.rs new file mode 100644 index 000000000..7df90c943 --- /dev/null +++ b/ethcore/src/snapshot/snapshot_service_trait.rs @@ -0,0 +1,54 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use super::{ManifestData, RestorationStatus}; +use util::{Bytes, H256}; +use ipc::IpcConfig; + +/// The interface for a snapshot network service. +/// This handles: +/// - restoration of snapshots to temporary databases. +/// - responding to queries for snapshot manifests and chunks +#[derive(Ipc)] +#[ipc(client_ident="RemoteSnapshotService")] +pub trait SnapshotService : Sync + Send { + /// Query the most recent manifest data. + fn manifest(&self) -> Option; + + /// Get raw chunk for a given hash. + fn chunk(&self, hash: H256) -> Option; + + /// Ask the snapshot service for the restoration status. + fn status(&self) -> RestorationStatus; + + /// Begin snapshot restoration. + /// If restoration in-progress, this will reset it. + /// From this point on, any previous snapshot may become unavailable. + fn begin_restore(&self, manifest: ManifestData); + + /// Abort an in-progress restoration if there is one. + fn abort_restore(&self); + + /// Feed a raw state chunk to the service to be processed asynchronously. + /// no-op if not currently restoring. + fn restore_state_chunk(&self, hash: H256, chunk: Bytes); + + /// Feed a raw block chunk to the service to be processed asynchronously. + /// no-op if currently restoring. + fn restore_block_chunk(&self, hash: H256, chunk: Bytes); +} + +impl IpcConfig for SnapshotService { } diff --git a/ethcore/src/types/mod.rs.in b/ethcore/src/types/mod.rs.in index e7731d1cc..0537fe056 100644 --- a/ethcore/src/types/mod.rs.in +++ b/ethcore/src/types/mod.rs.in @@ -31,3 +31,5 @@ pub mod trace_filter; pub mod call_analytics; pub mod transaction_import; pub mod block_import_error; +pub mod restoration_status; +pub mod snapshot_manifest; diff --git a/ethcore/src/types/restoration_status.rs b/ethcore/src/types/restoration_status.rs new file mode 100644 index 000000000..2840d9416 --- /dev/null +++ b/ethcore/src/types/restoration_status.rs @@ -0,0 +1,34 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Restoration status type definition + +/// Statuses for restorations. +#[derive(PartialEq, Eq, Clone, Copy, Debug, Binary)] +pub enum RestorationStatus { + /// No restoration. + Inactive, + /// Ongoing restoration. + Ongoing { + /// Number of state chunks completed. + state_chunks_done: u32, + /// Number of block chunks completed. + block_chunks_done: u32, + }, + /// Failed restoration. + Failed, +} + diff --git a/ethcore/src/types/snapshot_manifest.rs b/ethcore/src/types/snapshot_manifest.rs new file mode 100644 index 000000000..859ec016f --- /dev/null +++ b/ethcore/src/types/snapshot_manifest.rs @@ -0,0 +1,70 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Snapshot manifest type definition + +use util::hash::H256; +use rlp::*; +use util::Bytes; + +/// Manifest data. +#[derive(Debug, Clone, PartialEq, Eq, Binary)] +pub struct ManifestData { + /// List of state chunk hashes. + pub state_hashes: Vec, + /// List of block chunk hashes. + pub block_hashes: Vec, + /// The final, expected state root. + pub state_root: H256, + /// Block number this snapshot was taken at. + pub block_number: u64, + /// Block hash this snapshot was taken at. + pub block_hash: H256, +} + +impl ManifestData { + /// Encode the manifest data to rlp. + pub fn into_rlp(self) -> Bytes { + let mut stream = RlpStream::new_list(5); + stream.append(&self.state_hashes); + stream.append(&self.block_hashes); + stream.append(&self.state_root); + stream.append(&self.block_number); + stream.append(&self.block_hash); + + stream.out() + } + + /// Try to restore manifest data from raw bytes, interpreted as RLP. + pub fn from_rlp(raw: &[u8]) -> Result { + let decoder = UntrustedRlp::new(raw); + + let state_hashes: Vec = try!(decoder.val_at(0)); + let block_hashes: Vec = try!(decoder.val_at(1)); + let state_root: H256 = try!(decoder.val_at(2)); + let block_number: u64 = try!(decoder.val_at(3)); + let block_hash: H256 = try!(decoder.val_at(4)); + + Ok(ManifestData { + state_hashes: state_hashes, + block_hashes: block_hashes, + state_root: state_root, + block_number: block_number, + block_hash: block_hash, + }) + } +} + diff --git a/ethcore/src/verification/mod.rs b/ethcore/src/verification/mod.rs index 53c38a6b0..ed9c8ebc7 100644 --- a/ethcore/src/verification/mod.rs +++ b/ethcore/src/verification/mod.rs @@ -25,7 +25,7 @@ pub use self::canon_verifier::CanonVerifier; pub use self::noop_verifier::NoopVerifier; /// Verifier type. -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub enum VerifierType { /// Verifies block normally. Canon, diff --git a/parity/main.rs b/parity/main.rs index 86844baa9..9c2ae7942 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -99,9 +99,11 @@ mod modules; mod account; mod blockchain; mod presale; -mod run; -mod sync; mod snapshot; +mod run; +#[cfg(feature="ipc")] +mod sync; +#[cfg(feature="ipc")] mod boot; #[cfg(feature="stratum")] @@ -158,10 +160,24 @@ mod stratum_optional { } } -fn main() { +#[cfg(not(feature="ipc"))] +fn sync_main() -> bool { + false +} + +#[cfg(feature="ipc")] +fn sync_main() -> bool { // just redirect to the sync::main() if std::env::args().nth(1).map_or(false, |arg| arg == "sync") { sync::main(); + true + } else { + false + } +} + +fn main() { + if sync_main() { return; } diff --git a/parity/modules.rs b/parity/modules.rs index 5edbca702..73de6ca29 100644 --- a/parity/modules.rs +++ b/parity/modules.rs @@ -18,6 +18,7 @@ use std::sync::Arc; use ethcore::client::BlockChainClient; use hypervisor::Hypervisor; use ethsync::{SyncConfig, NetworkConfiguration, NetworkError}; +use ethcore::snapshot::SnapshotService; #[cfg(not(feature="ipc"))] use self::no_ipc_deps::*; #[cfg(feature="ipc")] @@ -25,10 +26,12 @@ use self::ipc_deps::*; use ethcore_logger::Config as LogConfig; use std::path::Path; +#[cfg(feature="ipc")] pub mod service_urls { use std::path::PathBuf; pub const CLIENT: &'static str = "parity-chain.ipc"; + pub const SNAPSHOT: &'static str = "parity-snapshot.ipc"; pub const SYNC: &'static str = "parity-sync.ipc"; pub const SYNC_NOTIFY: &'static str = "parity-sync-notify.ipc"; pub const NETWORK_MANAGER: &'static str = "parity-manage-net.ipc"; @@ -119,6 +122,7 @@ pub fn sync sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, _client: Arc, + _snapshot_service: Arc, log_settings: &LogConfig, ) -> Result @@ -148,10 +152,11 @@ pub fn sync sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, client: Arc, + snapshot_service: Arc, _log_settings: &LogConfig, ) -> Result { - let eth_sync = try!(EthSync::new(sync_cfg, client, net_cfg)); + let eth_sync = try!(EthSync::new(sync_cfg, client, snapshot_service, net_cfg)); Ok((eth_sync.clone() as Arc, eth_sync.clone() as Arc, eth_sync.clone() as Arc)) } diff --git a/parity/run.rs b/parity/run.rs index 8a68fe1af..174fe315f 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -179,13 +179,14 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { // take handle to client let client = service.client(); + let snapshot_service = service.snapshot_service(); // create external miner let external_miner = Arc::new(ExternalMiner::default()); // create sync object let (sync_provider, manage_network, chain_notify) = try!(modules::sync( - &mut hypervisor, sync_config, net_conf.into(), client.clone(), &cmd.logger_config, + &mut hypervisor, sync_config, net_conf.into(), client.clone(), snapshot_service, &cmd.logger_config, ).map_err(|e| format!("Sync error: {}", e))); service.add_notify(chain_notify.clone()); diff --git a/parity/snapshot.rs b/parity/snapshot.rs index ecc463a2e..5bf5024ae 100644 --- a/parity/snapshot.rs +++ b/parity/snapshot.rs @@ -129,10 +129,9 @@ impl SnapshotCommand { let informant_handle = snapshot.clone(); ::std::thread::spawn(move || { - while let RestorationStatus::Ongoing = informant_handle.status() { - let (state_chunks, block_chunks) = informant_handle.chunks_done(); + while let RestorationStatus::Ongoing { state_chunks_done, block_chunks_done } = informant_handle.status() { info!("Processed {}/{} state chunks and {}/{} block chunks.", - state_chunks, num_state, block_chunks, num_blocks); + state_chunks_done, num_state, block_chunks_done, num_blocks); ::std::thread::sleep(Duration::from_secs(5)); } @@ -161,7 +160,7 @@ impl SnapshotCommand { } match snapshot.status() { - RestorationStatus::Ongoing => Err("Snapshot file is incomplete and missing chunks.".into()), + RestorationStatus::Ongoing { .. } => Err("Snapshot file is incomplete and missing chunks.".into()), RestorationStatus::Failed => Err("Snapshot restoration failed.".into()), RestorationStatus::Inactive => { info!("Restoration complete."); diff --git a/parity/sync.rs b/parity/sync.rs index 27e9d5a6a..85f771546 100644 --- a/parity/sync.rs +++ b/parity/sync.rs @@ -20,6 +20,7 @@ use std::sync::Arc; use std::sync::atomic::AtomicBool; use hypervisor::{SYNC_MODULE_ID, HYPERVISOR_IPC_URL, ControlService}; use ethcore::client::{RemoteClient, ChainNotify}; +use ethcore::snapshot::{RemoteSnapshotService}; use ethsync::{SyncProvider, EthSync, ManageNetwork, ServiceConfiguration}; use modules::service_urls; use boot; @@ -45,8 +46,9 @@ pub fn main() { .unwrap_or_else(|e| panic!("Fatal: error reading boot arguments ({:?})", e)); let remote_client = dependency!(RemoteClient, &service_urls::with_base(&service_config.io_path, service_urls::CLIENT)); + let remote_snapshot = dependency!(RemoteSnapshotService, &service_urls::with_base(&service_config.io_path, service_urls::SNAPSHOT)); - let sync = EthSync::new(service_config.sync, remote_client.service().clone(), service_config.net).unwrap(); + let sync = EthSync::new(service_config.sync, remote_client.service().clone(), remote_snapshot.service().clone(), service_config.net).unwrap(); let _ = boot::main_thread(); let service_stop = Arc::new(AtomicBool::new(false)); diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 7807c01eb..9487f020d 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -254,7 +254,8 @@ impl Eth for EthClient where let status = take_weak!(self.sync).status(); let res = match status.state { SyncState::Idle => SyncStatus::None, - SyncState::Waiting | SyncState::Blocks | SyncState::NewBlocks | SyncState::ChainHead => { + SyncState::Waiting | SyncState::Blocks | SyncState::NewBlocks | SyncState::ChainHead + | SyncState::SnapshotManifest | SyncState::SnapshotData | SyncState::SnapshotWaiting => { let current_block = U256::from(take_weak!(self.client).chain_info().best_block_number); let highest_block = U256::from(status.highest_block_number.unwrap_or(status.start_block_number)); diff --git a/rpc/src/v1/tests/helpers/sync_provider.rs b/rpc/src/v1/tests/helpers/sync_provider.rs index 94f7b4893..b83aff758 100644 --- a/rpc/src/v1/tests/helpers/sync_provider.rs +++ b/rpc/src/v1/tests/helpers/sync_provider.rs @@ -49,6 +49,8 @@ impl TestSyncProvider { num_peers: config.num_peers, num_active_peers: 0, mem_used: 0, + num_snapshot_chunks: 0, + snapshot_chunks_done: 0, }), } } diff --git a/sync/src/api.rs b/sync/src/api.rs index a19004642..de1769d9c 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -20,6 +20,7 @@ use network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId, use util::{U256, H256}; use io::{TimerToken}; use ethcore::client::{BlockChainClient, ChainNotify}; +use ethcore::snapshot::SnapshotService; use ethcore::header::BlockNumber; use sync_io::NetSyncIo; use chain::{ChainSync, SyncStatus}; @@ -71,12 +72,12 @@ pub struct EthSync { impl EthSync { /// Creates and register protocol with the network service - pub fn new(config: SyncConfig, chain: Arc, network_config: NetworkConfiguration) -> Result, NetworkError> { + pub fn new(config: SyncConfig, chain: Arc, snapshot_service: Arc, network_config: NetworkConfiguration) -> Result, NetworkError> { let chain_sync = ChainSync::new(config, &*chain); let service = try!(NetworkService::new(try!(network_config.into_basic()))); let sync = Arc::new(EthSync{ network: service, - handler: Arc::new(SyncProtocolHandler { sync: RwLock::new(chain_sync), chain: chain }), + handler: Arc::new(SyncProtocolHandler { sync: RwLock::new(chain_sync), chain: chain, snapshot_service: snapshot_service }), }); Ok(sync) @@ -93,8 +94,10 @@ impl SyncProvider for EthSync { } struct SyncProtocolHandler { - /// Shared blockchain client. TODO: this should evetually become an IPC endpoint + /// Shared blockchain client. chain: Arc, + /// Shared snapshot service. + snapshot_service: Arc, /// Sync strategy sync: RwLock, } @@ -105,21 +108,21 @@ impl NetworkProtocolHandler for SyncProtocolHandler { } fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { - ChainSync::dispatch_packet(&self.sync, &mut NetSyncIo::new(io, &*self.chain), *peer, packet_id, data); + ChainSync::dispatch_packet(&self.sync, &mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service), *peer, packet_id, data); } fn connected(&self, io: &NetworkContext, peer: &PeerId) { - self.sync.write().on_peer_connected(&mut NetSyncIo::new(io, &*self.chain), *peer); + self.sync.write().on_peer_connected(&mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service), *peer); } fn disconnected(&self, io: &NetworkContext, peer: &PeerId) { - self.sync.write().on_peer_aborting(&mut NetSyncIo::new(io, &*self.chain), *peer); + self.sync.write().on_peer_aborting(&mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service), *peer); } fn timeout(&self, io: &NetworkContext, _timer: TimerToken) { - self.sync.write().maintain_peers(&mut NetSyncIo::new(io, &*self.chain)); - self.sync.write().maintain_sync(&mut NetSyncIo::new(io, &*self.chain)); - self.sync.write().propagate_new_transactions(&mut NetSyncIo::new(io, &*self.chain)); + self.sync.write().maintain_peers(&mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service)); + self.sync.write().maintain_sync(&mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service)); + self.sync.write().propagate_new_transactions(&mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service)); } } @@ -133,7 +136,7 @@ impl ChainNotify for EthSync { _duration: u64) { self.network.with_context(ETH_PROTOCOL, |context| { - let mut sync_io = NetSyncIo::new(context, &*self.handler.chain); + let mut sync_io = NetSyncIo::new(context, &*self.handler.chain, &*self.handler.snapshot_service); self.handler.sync.write().chain_new_blocks( &mut sync_io, &imported, @@ -146,7 +149,7 @@ impl ChainNotify for EthSync { fn start(&self) { self.network.start().unwrap_or_else(|e| warn!("Error starting network: {:?}", e)); - self.network.register_protocol(self.handler.clone(), ETH_PROTOCOL, &[62u8, 63u8]) + self.network.register_protocol(self.handler.clone(), ETH_PROTOCOL, &[62u8, 63u8, 64u8]) .unwrap_or_else(|e| warn!("Error registering ethereum protocol: {:?}", e)); } @@ -202,7 +205,7 @@ impl ManageNetwork for EthSync { fn stop_network(&self) { self.network.with_context(ETH_PROTOCOL, |context| { - let mut sync_io = NetSyncIo::new(context, &*self.handler.chain); + let mut sync_io = NetSyncIo::new(context, &*self.handler.chain, &*self.handler.snapshot_service); self.handler.sync.write().abort(&mut sync_io); }); self.stop(); diff --git a/sync/src/chain.rs b/sync/src/chain.rs index e5e5de5dc..ea5e593f3 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -96,17 +96,19 @@ use ethcore::header::{BlockNumber, Header as BlockHeader}; use ethcore::client::{BlockChainClient, BlockStatus, BlockID, BlockChainInfo, BlockImportError}; use ethcore::error::*; use ethcore::block::Block; +use ethcore::snapshot::{ManifestData, RestorationStatus}; use sync_io::SyncIo; use time; use super::SyncConfig; use blocks::BlockCollection; +use snapshot::{Snapshot, ChunkType}; use rand::{thread_rng, Rng}; known_heap_size!(0, PeerInfo); type PacketDecodeError = DecoderError; -const PROTOCOL_VERSION: u8 = 63u8; +const PROTOCOL_VERSION: u8 = 64u8; const MAX_BODIES_TO_SEND: usize = 256; const MAX_HEADERS_TO_SEND: usize = 512; const MAX_NODE_DATA_TO_SEND: usize = 1024; @@ -136,14 +138,26 @@ const GET_NODE_DATA_PACKET: u8 = 0x0d; const NODE_DATA_PACKET: u8 = 0x0e; const GET_RECEIPTS_PACKET: u8 = 0x0f; const RECEIPTS_PACKET: u8 = 0x10; +const GET_SNAPSHOT_MANIFEST_PACKET: u8 = 0x11; +const SNAPSHOT_MANIFEST_PACKET: u8 = 0x12; +const GET_SNAPSHOT_DATA_PACKET: u8 = 0x13; +const SNAPSHOT_DATA_PACKET: u8 = 0x14; const HEADERS_TIMEOUT_SEC: f64 = 15f64; const BODIES_TIMEOUT_SEC: f64 = 5f64; const FORK_HEADER_TIMEOUT_SEC: f64 = 3f64; +const SNAPSHOT_MANIFEST_TIMEOUT_SEC: f64 = 3f64; +const SNAPSHOT_DATA_TIMEOUT_SEC: f64 = 10f64; #[derive(Copy, Clone, Eq, PartialEq, Debug)] /// Sync state pub enum SyncState { + /// Waiting for pv64 peers to start snapshot syncing + SnapshotManifest, + /// Downloading snapshot data + SnapshotData, + /// Waiting for snapshot restoration to complete + SnapshotWaiting, /// Downloading subchain heads ChainHead, /// Initial chain sync complete. Waiting for new packets @@ -177,10 +191,14 @@ pub struct SyncStatus { pub blocks_received: BlockNumber, /// Total number of connected peers pub num_peers: usize, - /// Total number of active peers + /// Total number of active peers. pub num_active_peers: usize, - /// Heap memory used in bytes + /// Heap memory used in bytes. pub mem_used: usize, + /// Snapshot chunks + pub num_snapshot_chunks: usize, + /// Snapshot chunks downloaded + pub snapshot_chunks_done: usize, } impl SyncStatus { @@ -207,6 +225,8 @@ enum PeerAsking { BlockHeaders, BlockBodies, Heads, + SnapshotManifest, + SnapshotData, } #[derive(Clone, Eq, PartialEq)] @@ -240,6 +260,8 @@ struct PeerInfo { asking_blocks: Vec, /// Holds requested header hash if currently requesting block header by hash asking_hash: Option, + /// Holds requested snapshot chunk hash if any. + asking_snapshot_data: Option, /// Request timestamp ask_time: f64, /// Holds a set of transactions recently sent to this peer to avoid spamming. @@ -248,6 +270,10 @@ struct PeerInfo { expired: bool, /// Peer fork confirmation status confirmation: ForkConfirmation, + /// Best snapshot hash + snapshot_hash: Option, + /// Best snapshot block number + snapshot_number: Option, } impl PeerInfo { @@ -293,6 +319,8 @@ pub struct ChainSync { network_id: U256, /// Optional fork block to check fork_block: Option<(BlockNumber, H256)>, + /// Snapshot downloader. + snapshot: Snapshot, } type RlpResponseResult = Result, PacketDecodeError>; @@ -301,8 +329,8 @@ impl ChainSync { /// Create a new instance of syncing strategy. pub fn new(config: SyncConfig, chain: &BlockChainClient) -> ChainSync { let chain = chain.chain_info(); - let mut sync = ChainSync { - state: SyncState::ChainHead, + ChainSync { + state: SyncState::Idle, starting_block: chain.best_block_number, highest_block: None, last_imported_block: chain.best_block_number, @@ -317,16 +345,15 @@ impl ChainSync { _max_download_ahead_blocks: max(MAX_HEADERS_TO_REQUEST, config.max_download_ahead_blocks), network_id: config.network_id, fork_block: config.fork_block, - }; - sync.reset(); - sync + snapshot: Snapshot::new(), + } } /// @returns Synchonization status pub fn status(&self) -> SyncStatus { SyncStatus { state: self.state.clone(), - protocol_version: 63, + protocol_version: if self.state == SyncState::SnapshotData { 64 } else { 63 }, network_id: self.network_id, start_block_number: self.starting_block, last_imported_block_number: Some(self.last_imported_block), @@ -335,6 +362,8 @@ impl ChainSync { blocks_total: match self.highest_block { Some(x) if x > self.starting_block => x - self.starting_block, _ => 0 }, num_peers: self.peers.values().filter(|p| p.is_allowed()).count(), num_active_peers: self.peers.values().filter(|p| p.is_allowed() && p.asking != PeerAsking::Nothing).count(), + num_snapshot_chunks: self.snapshot.total_chunks(), + snapshot_chunks_done: self.snapshot.done_chunks(), mem_used: self.blocks.heap_size() + self.peers.heap_size_of_children() @@ -350,8 +379,13 @@ impl ChainSync { #[cfg_attr(feature="dev", allow(for_kv_map))] // Because it's not possible to get `values_mut()` /// Reset sync. Clear all downloaded data but keep the queue - fn reset(&mut self) { + fn reset(&mut self, io: &mut SyncIo) { self.blocks.clear(); + self.snapshot.clear(); + if self.state == SyncState::SnapshotData { + debug!(target:"sync", "Aborting snapshot restore"); + io.snapshot_service().abort_restore(); + } for (_, ref mut p) in &mut self.peers { p.asking_blocks.clear(); p.asking_hash = None; @@ -368,7 +402,7 @@ impl ChainSync { /// Restart sync pub fn restart(&mut self, io: &mut SyncIo) { trace!(target: "sync", "Restarting"); - self.reset(); + self.reset(io); self.start_sync_round(io); self.continue_sync(io); } @@ -380,13 +414,19 @@ impl ChainSync { if self.active_peers.is_empty() { trace!(target: "sync", "No more active peers"); if self.state == SyncState::ChainHead { - self.complete_sync(); + self.complete_sync(io); } else { self.restart(io); } } } + fn start_snapshot_sync(&mut self, io: &mut SyncIo, peer_id: PeerId) { + self.snapshot.clear(); + self.request_snapshot_manifest(io, peer_id); + self.state = SyncState::SnapshotManifest; + } + /// Restart sync after bad block has been detected. May end up re-downloading up to QUEUE_SIZE blocks fn restart_on_bad_block(&mut self, io: &mut SyncIo) { // Do not assume that the block queue/chain still has our last_imported_block @@ -398,8 +438,9 @@ impl ChainSync { /// Called by peer to report status fn on_peer_status(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { + let protocol_version: u32 = try!(r.val_at(0)); let peer = PeerInfo { - protocol_version: try!(r.val_at(0)), + protocol_version: protocol_version, network_id: try!(r.val_at(1)), difficulty: Some(try!(r.val_at(2))), latest_hash: try!(r.val_at(3)), @@ -412,6 +453,9 @@ impl ChainSync { last_sent_transactions: HashSet::new(), expired: false, confirmation: if self.fork_block.is_none() { ForkConfirmation::Confirmed } else { ForkConfirmation::Unconfirmed }, + asking_snapshot_data: None, + snapshot_hash: if protocol_version == 64 { Some(try!(r.val_at(5))) } else { None }, + snapshot_number: if protocol_version == 64 { Some(try!(r.val_at(6))) } else { None }, }; trace!(target: "sync", "New peer {} (protocol: {}, network: {:?}, difficulty: {:?}, latest:{}, genesis:{})", peer_id, peer.protocol_version, peer.network_id, peer.difficulty, peer.latest_hash, peer.genesis); @@ -749,6 +793,96 @@ impl ChainSync { Ok(()) } + /// Called when snapshot manifest is downloaded from a peer. + fn on_snapshot_manifest(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { + if !self.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { + trace!(target: "sync", "Ignoring snapshot manifest from unconfirmed peer {}", peer_id); + return Ok(()); + } + self.clear_peer_download(peer_id); + if !self.reset_peer_asking(peer_id, PeerAsking::SnapshotManifest) || self.state != SyncState::SnapshotManifest { + trace!(target: "sync", "{}: Ignored unexpected manifest", peer_id); + self.continue_sync(io); + return Ok(()); + } + + let manifest_rlp = try!(r.at(0)); + let manifest = match ManifestData::from_rlp(&manifest_rlp.as_raw()) { + Err(e) => { + trace!(target: "sync", "{}: Ignored bad manifest: {:?}", peer_id, e); + io.disconnect_peer(peer_id); + self.continue_sync(io); + return Ok(()); + } + Ok(manifest) => manifest, + }; + self.snapshot.reset_to(&manifest, &manifest_rlp.as_raw().sha3()); + io.snapshot_service().begin_restore(manifest); + self.state = SyncState::SnapshotData; + + // give a task to the same peer first. + self.sync_peer(io, peer_id, false); + // give tasks to other peers + self.continue_sync(io); + Ok(()) + } + + /// Called when snapshot data is downloaded from a peer. + fn on_snapshot_data(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { + if !self.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { + trace!(target: "sync", "Ignoring snapshot data from unconfirmed peer {}", peer_id); + return Ok(()); + } + self.clear_peer_download(peer_id); + if !self.reset_peer_asking(peer_id, PeerAsking::SnapshotData) || self.state != SyncState::SnapshotData { + trace!(target: "sync", "{}: Ignored unexpected snapshot data", peer_id); + self.continue_sync(io); + return Ok(()); + } + + // check service status + match io.snapshot_service().status() { + RestorationStatus::Inactive | RestorationStatus::Failed => { + trace!(target: "sync", "{}: Snapshot restoration aborted", peer_id); + self.state = SyncState::Idle; + self.snapshot.clear(); + self.continue_sync(io); + return Ok(()); + }, + RestorationStatus::Ongoing { .. } => { + trace!(target: "sync", "{}: Snapshot restoration is ongoing", peer_id); + }, + } + + let snapshot_data: Bytes = try!(r.val_at(0)); + match self.snapshot.validate_chunk(&snapshot_data) { + Ok(ChunkType::Block(hash)) => { + trace!(target: "sync", "{}: Processing block chunk", peer_id); + io.snapshot_service().restore_block_chunk(hash, snapshot_data); + } + Ok(ChunkType::State(hash)) => { + trace!(target: "sync", "{}: Processing state chunk", peer_id); + io.snapshot_service().restore_state_chunk(hash, snapshot_data); + } + Err(()) => { + trace!(target: "sync", "{}: Got bad snapshot chunk", peer_id); + io.disconnect_peer(peer_id); + self.continue_sync(io); + return Ok(()); + } + } + + if self.snapshot.is_complete() { + // wait for snapshot restoration process to complete + self.state = SyncState::SnapshotWaiting; + } + // give a task to the same peer first. + self.sync_peer(io, peer_id, false); + // give tasks to other peers + self.continue_sync(io); + Ok(()) + } + /// Called by peer when it is disconnecting pub fn on_peer_aborting(&mut self, io: &mut SyncIo, peer: PeerId) { trace!(target: "sync", "== Disconnecting {}: {}", peer, io.peer_info(peer)); @@ -764,7 +898,7 @@ impl ChainSync { /// Called when a new peer is connected pub fn on_peer_connected(&mut self, io: &mut SyncIo, peer: PeerId) { trace!(target: "sync", "== Connected {}: {}", peer, io.peer_info(peer)); - if let Err(e) = self.send_status(io) { + if let Err(e) = self.send_status(io, peer) { debug!(target:"sync", "Error sending status request: {:?}", e); io.disable_peer(peer); } @@ -772,24 +906,27 @@ impl ChainSync { /// Resume downloading fn continue_sync(&mut self, io: &mut SyncIo) { - let mut peers: Vec<(PeerId, U256)> = self.peers.iter().filter_map(|(k, p)| - if p.can_sync() { Some((*k, p.difficulty.unwrap_or_else(U256::zero))) } else { None }).collect(); + let mut peers: Vec<(PeerId, U256, u32)> = self.peers.iter().filter_map(|(k, p)| + if p.can_sync() { Some((*k, p.difficulty.unwrap_or_else(U256::zero), p.protocol_version)) } else { None }).collect(); thread_rng().shuffle(&mut peers); //TODO: sort by rating + // prefer peers with higher protocol version + peers.sort_by(|&(_, _, ref v1), &(_, _, ref v2)| v1.cmp(v2)); trace!(target: "sync", "Syncing with {}/{} peers", self.active_peers.len(), peers.len()); - for (p, _) in peers { + for (p, _, _) in peers { if self.active_peers.contains(&p) { self.sync_peer(io, p, false); } } - if self.state != SyncState::Waiting && !self.peers.values().any(|p| p.asking != PeerAsking::Nothing && p.can_sync()) { - self.complete_sync(); + if self.state != SyncState::Waiting && self.state != SyncState::SnapshotWaiting + && !self.peers.values().any(|p| p.asking != PeerAsking::Nothing && p.can_sync()) { + self.complete_sync(io); } } /// Called after all blocks have been downloaded - fn complete_sync(&mut self) { + fn complete_sync(&mut self, io: &mut SyncIo) { trace!(target: "sync", "Sync complete"); - self.reset(); + self.reset(io); self.state = SyncState::Idle; } @@ -805,7 +942,7 @@ impl ChainSync { trace!(target: "sync", "Skipping deactivated peer"); return; } - let (peer_latest, peer_difficulty) = { + let (peer_latest, peer_difficulty, peer_snapshot_number, peer_snapshot_hash) = { let peer = self.peers.get_mut(&peer_id).unwrap(); if peer.asking != PeerAsking::Nothing || !peer.can_sync() { return; @@ -814,7 +951,11 @@ impl ChainSync { trace!(target: "sync", "Waiting for the block queue"); return; } - (peer.latest_hash.clone(), peer.difficulty.clone()) + if self.state == SyncState::SnapshotWaiting { + trace!(target: "sync", "Waiting for the snapshot restoration"); + return; + } + (peer.latest_hash.clone(), peer.difficulty.clone(), peer.snapshot_number.as_ref().cloned(), peer.snapshot_hash.as_ref().cloned()) }; let chain_info = io.chain().chain_info(); let td = chain_info.pending_total_difficulty; @@ -823,13 +964,18 @@ impl ChainSync { if force || self.state == SyncState::NewBlocks || peer_difficulty.map_or(true, |pd| pd > syncing_difficulty) { match self.state { SyncState::Idle => { - if self.last_imported_block < chain_info.best_block_number { - self.last_imported_block = chain_info.best_block_number; - self.last_imported_hash = chain_info.best_block_hash; + // check if we can start snapshot sync with this peer + if peer_snapshot_number.unwrap_or(0) > 0 && chain_info.best_block_number == 0 { + self.start_snapshot_sync(io, peer_id); + } else { + if self.last_imported_block < chain_info.best_block_number { + self.last_imported_block = chain_info.best_block_number; + self.last_imported_hash = chain_info.best_block_hash; + } + trace!(target: "sync", "Starting sync with {}", peer_id); + self.start_sync_round(io); + self.sync_peer(io, peer_id, force); } - trace!(target: "sync", "Starting sync with {}", peer_id); - self.start_sync_round(io); - self.sync_peer(io, peer_id, force); }, SyncState::ChainHead => { // Request subchain headers @@ -843,8 +989,14 @@ impl ChainSync { if io.chain().block_status(BlockID::Hash(peer_latest)) == BlockStatus::Unknown { self.request_blocks(io, peer_id, false); } - } - SyncState::Waiting => () + }, + SyncState::SnapshotData => { + if peer_snapshot_hash.is_some() && peer_snapshot_hash == self.snapshot.snapshot_hash() { + self.request_snapshot_data(io, peer_id); + } + }, + SyncState::SnapshotManifest => (), //already downloading from other peer + SyncState::Waiting | SyncState::SnapshotWaiting => () } } } @@ -903,6 +1055,16 @@ impl ChainSync { } } + /// Find some headers or blocks to download for a peer. + fn request_snapshot_data(&mut self, io: &mut SyncIo, peer_id: PeerId) { + self.clear_peer_download(peer_id); + // find chunk data to download + if let Some(hash) = self.snapshot.needed_chunk() { + self.peers.get_mut(&peer_id).unwrap().asking_snapshot_data = Some(hash.clone()); + self.request_snapshot_chunk(io, peer_id, &hash); + } + } + /// Clear all blocks/headers marked as being downloaded by a peer. fn clear_peer_download(&mut self, peer_id: PeerId) { let peer = self.peers.get_mut(&peer_id).unwrap(); @@ -917,9 +1079,15 @@ impl ChainSync { self.blocks.clear_body_download(b); } }, + PeerAsking::SnapshotData => { + if let Some(hash) = peer.asking_snapshot_data { + self.snapshot.clear_chunk_download(&hash); + } + }, _ => (), } peer.asking_blocks.clear(); + peer.asking_snapshot_data = None; } fn block_imported(&mut self, hash: &H256, number: BlockNumber, parent: &H256) { @@ -1016,6 +1184,22 @@ impl ChainSync { rlp.append(&if reverse {1u32} else {0u32}); self.send_request(sync, peer_id, asking, GET_BLOCK_HEADERS_PACKET, rlp.out()); } + + /// Request snapshot manifest from a peer. + fn request_snapshot_manifest(&mut self, sync: &mut SyncIo, peer_id: PeerId) { + trace!(target: "sync", "{} <- GetSnapshotManifest", peer_id); + let rlp = RlpStream::new_list(0); + self.send_request(sync, peer_id, PeerAsking::SnapshotManifest, GET_SNAPSHOT_MANIFEST_PACKET, rlp.out()); + } + + /// Request snapshot chunk from a peer. + fn request_snapshot_chunk(&mut self, sync: &mut SyncIo, peer_id: PeerId, chunk: &H256) { + trace!(target: "sync", "{} <- GetSnapshotData {:?}", peer_id, chunk); + let mut rlp = RlpStream::new_list(1); + rlp.append(chunk); + self.send_request(sync, peer_id, PeerAsking::SnapshotData, GET_SNAPSHOT_DATA_PACKET, rlp.out()); + } + /// Request block bodies from a peer fn request_bodies(&mut self, sync: &mut SyncIo, peer_id: PeerId, hashes: Vec) { let mut rlp = RlpStream::new_list(hashes.len()); @@ -1086,14 +1270,22 @@ impl ChainSync { } /// Send Status message - fn send_status(&mut self, io: &mut SyncIo) -> Result<(), NetworkError> { - let mut packet = RlpStream::new_list(5); + fn send_status(&mut self, io: &mut SyncIo, peer: PeerId) -> Result<(), NetworkError> { + let pv64 = io.eth_protocol_version(peer) >= 64; + let mut packet = RlpStream::new_list(if pv64 { 7 } else { 5 }); let chain = io.chain().chain_info(); packet.append(&(PROTOCOL_VERSION as u32)); packet.append(&self.network_id); packet.append(&chain.total_difficulty); packet.append(&chain.best_block_hash); packet.append(&chain.genesis_hash); + if pv64 { + let manifest = io.snapshot_service().manifest(); + let block_number = manifest.as_ref().map_or(0, |m| m.block_number); + let manifest_hash = manifest.map_or(H256::new(), |m| m.into_rlp().sha3()); + packet.append(&manifest_hash); + packet.append(&block_number); + } io.respond(STATUS_PACKET, packet.out()) } @@ -1230,6 +1422,48 @@ impl ChainSync { Ok(Some((RECEIPTS_PACKET, rlp_result))) } + /// Respond to GetSnapshotManifest request + fn return_snapshot_manifest(io: &SyncIo, r: &UntrustedRlp, peer_id: PeerId) -> RlpResponseResult { + let count = r.item_count(); + trace!(target: "sync", "{} -> GetSnapshotManifest", peer_id); + if count != 0 { + debug!(target: "sync", "Invalid GetSnapshotManifest request, ignoring."); + return Ok(None); + } + let rlp = match io.snapshot_service().manifest() { + Some(manifest) => { + trace!(target: "sync", "{} <- SnapshotManifest", peer_id); + let mut rlp = RlpStream::new_list(1); + rlp.append_raw(&manifest.into_rlp(), 1); + rlp + }, + None => { + trace!(target: "sync", "{}: No manifest to return", peer_id); + let rlp = RlpStream::new_list(0); + rlp + } + }; + Ok(Some((SNAPSHOT_MANIFEST_PACKET, rlp))) + } + + /// Respond to GetSnapshotManifest request + fn return_snapshot_data(io: &SyncIo, r: &UntrustedRlp, peer_id: PeerId) -> RlpResponseResult { + let hash: H256 = try!(r.val_at(0)); + trace!(target: "sync", "{} -> GetSnapshotData {:?}", peer_id, hash); + let rlp = match io.snapshot_service().chunk(hash) { + Some(data) => { + let mut rlp = RlpStream::new_list(1); + rlp.append(&data); + rlp + }, + None => { + let rlp = RlpStream::new_list(0); + rlp + } + }; + Ok(Some((SNAPSHOT_DATA_PACKET, rlp))) + } + fn return_rlp(io: &mut SyncIo, rlp: &UntrustedRlp, peer: PeerId, rlp_func: FRlp, error_func: FError) -> Result<(), PacketDecodeError> where FRlp : Fn(&SyncIo, &UntrustedRlp, PeerId) -> RlpResponseResult, FError : FnOnce(NetworkError) -> String @@ -1266,6 +1500,14 @@ impl ChainSync { ChainSync::return_node_data, |e| format!("Error sending nodes: {:?}", e)), + GET_SNAPSHOT_MANIFEST_PACKET => ChainSync::return_rlp(io, &rlp, peer, + ChainSync::return_snapshot_manifest, + |e| format!("Error sending snapshot manifest: {:?}", e)), + + GET_SNAPSHOT_DATA_PACKET => ChainSync::return_rlp(io, &rlp, peer, + ChainSync::return_snapshot_data, + |e| format!("Error sending snapshot data: {:?}", e)), + _ => { sync.write().on_packet(io, peer, packet_id, data); Ok(()) @@ -1289,6 +1531,8 @@ impl ChainSync { BLOCK_BODIES_PACKET => self.on_peer_block_bodies(io, peer, &rlp), NEW_BLOCK_PACKET => self.on_peer_new_block(io, peer, &rlp), NEW_BLOCK_HASHES_PACKET => self.on_peer_new_hashes(io, peer, &rlp), + SNAPSHOT_MANIFEST_PACKET => self.on_snapshot_manifest(io, peer, &rlp), + SNAPSHOT_DATA_PACKET => self.on_snapshot_data(io, peer, &rlp), _ => { debug!(target: "sync", "Unknown packet {}", packet_id); Ok(()) @@ -1308,6 +1552,8 @@ impl ChainSync { PeerAsking::BlockBodies => (tick - peer.ask_time) > BODIES_TIMEOUT_SEC, PeerAsking::Nothing => false, PeerAsking::ForkHeader => (tick - peer.ask_time) > FORK_HEADER_TIMEOUT_SEC, + PeerAsking::SnapshotManifest => (tick - peer.ask_time) > SNAPSHOT_MANIFEST_TIMEOUT_SEC, + PeerAsking::SnapshotData => (tick - peer.ask_time) > SNAPSHOT_DATA_TIMEOUT_SEC, }; if timeout { trace!(target:"sync", "Timeout {}", peer_id); @@ -1321,9 +1567,12 @@ impl ChainSync { } fn check_resume(&mut self, io: &mut SyncIo) { - if !io.chain().queue_info().is_full() && self.state == SyncState::Waiting { + if self.state == SyncState::Waiting && !io.chain().queue_info().is_full() && self.state == SyncState::Waiting { self.state = SyncState::Blocks; self.continue_sync(io); + } else if self.state == SyncState::SnapshotWaiting && io.snapshot_service().status() == RestorationStatus::Inactive { + self.state = SyncState::Idle; + self.continue_sync(io); } } @@ -1559,6 +1808,7 @@ impl ChainSync { #[cfg(test)] mod tests { use tests::helpers::*; + use tests::snapshot::TestSnapshotService; use super::*; use ::SyncConfig; use util::*; @@ -1612,7 +1862,8 @@ mod tests { fn return_receipts_empty() { let mut client = TestBlockChainClient::new(); let mut queue = VecDeque::new(); - let io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let io = TestIo::new(&mut client, &mut ss, &mut queue, None); let result = ChainSync::return_receipts(&io, &UntrustedRlp::new(&[0xc0]), 0); @@ -1624,7 +1875,8 @@ mod tests { let mut client = TestBlockChainClient::new(); let mut queue = VecDeque::new(); let sync = dummy_sync_with_peer(H256::new(), &client); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let mut receipt_list = RlpStream::new_list(4); receipt_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555")); @@ -1679,7 +1931,8 @@ mod tests { let hashes: Vec<_> = headers.iter().map(|h| HeaderView::new(h).sha3()).collect(); let mut queue = VecDeque::new(); - let io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let io = TestIo::new(&mut client, &mut ss, &mut queue, None); let unknown: H256 = H256::new(); let result = ChainSync::return_block_headers(&io, &UntrustedRlp::new(&make_hash_req(&unknown, 1, 0, false)), 0); @@ -1717,7 +1970,8 @@ mod tests { let mut client = TestBlockChainClient::new(); let mut queue = VecDeque::new(); let sync = dummy_sync_with_peer(H256::new(), &client); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let mut node_list = RlpStream::new_list(3); node_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555")); @@ -1758,6 +2012,9 @@ mod tests { last_sent_transactions: HashSet::new(), expired: false, confirmation: super::ForkConfirmation::Confirmed, + snapshot_number: None, + snapshot_hash: None, + asking_snapshot_data: None, }); sync } @@ -1769,7 +2026,8 @@ mod tests { let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(10), &client); let chain_info = client.chain_info(); - let io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let io = TestIo::new(&mut client, &mut ss, &mut queue, None); let lagging_peers = sync.get_lagging_peers(&chain_info, &io); @@ -1800,7 +2058,8 @@ mod tests { let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let chain_info = client.chain_info(); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let peers = sync.get_lagging_peers(&chain_info, &io); let peer_count = sync.propagate_new_hashes(&chain_info, &mut io, &peers); @@ -1820,7 +2079,8 @@ mod tests { let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let chain_info = client.chain_info(); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let peers = sync.get_lagging_peers(&chain_info, &io); let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[], &peers); @@ -1840,7 +2100,8 @@ mod tests { let hash = client.block_hash(BlockID::Number(99)).unwrap(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let chain_info = client.chain_info(); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let peers = sync.get_lagging_peers(&chain_info, &io); let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[hash.clone()], &peers); @@ -1859,7 +2120,8 @@ mod tests { client.insert_transaction_to_queue(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); let mut queue = VecDeque::new(); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let peer_count = sync.propagate_new_transactions(&mut io); // Try to propagate same transactions for the second time let peer_count2 = sync.propagate_new_transactions(&mut io); @@ -1880,7 +2142,8 @@ mod tests { client.insert_transaction_to_queue(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); let mut queue = VecDeque::new(); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let peer_count = sync.propagate_new_transactions(&mut io); sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[]); // Try to propagate same transactions for the second time @@ -1903,17 +2166,17 @@ mod tests { client.insert_transaction_to_queue(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); let mut queue = VecDeque::new(); + let mut ss = TestSnapshotService::new(); // should sent some { - - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let peer_count = sync.propagate_new_transactions(&mut io); assert_eq!(1, io.queue.len()); assert_eq!(1, peer_count); } // Insert some more client.insert_transaction_to_queue(); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); // Propagate new transactions let peer_count2 = sync.propagate_new_transactions(&mut io); // And now the peer should have all transactions @@ -1939,7 +2202,8 @@ mod tests { let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); //sync.have_common_block = true; - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let block = UntrustedRlp::new(&block_data); @@ -1957,7 +2221,8 @@ mod tests { let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let block = UntrustedRlp::new(&block_data); @@ -1972,7 +2237,8 @@ mod tests { client.add_blocks(10, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let empty_data = vec![]; let block = UntrustedRlp::new(&empty_data); @@ -1988,7 +2254,8 @@ mod tests { client.add_blocks(10, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let hashes_data = get_dummy_hashes(); let hashes_rlp = UntrustedRlp::new(&hashes_data); @@ -2004,7 +2271,8 @@ mod tests { client.add_blocks(10, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let empty_hashes_data = vec![]; let hashes_rlp = UntrustedRlp::new(&empty_hashes_data); @@ -2023,7 +2291,8 @@ mod tests { let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let chain_info = client.chain_info(); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let peers = sync.get_lagging_peers(&chain_info, &io); sync.propagate_new_hashes(&chain_info, &mut io, &peers); @@ -2042,7 +2311,8 @@ mod tests { let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let chain_info = client.chain_info(); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let peers = sync.get_lagging_peers(&chain_info, &io); sync.propagate_blocks(&chain_info, &mut io, &[], &peers); @@ -2076,7 +2346,8 @@ mod tests { // when { let mut queue = VecDeque::new(); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); io.chain.miner.chain_new_blocks(io.chain, &[], &[], &[], &good_blocks); sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[]); assert_eq!(io.chain.miner.status().transactions_in_future_queue, 0); @@ -2090,7 +2361,8 @@ mod tests { } { let mut queue = VecDeque::new(); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); io.chain.miner.chain_new_blocks(io.chain, &[], &[], &good_blocks, &retracted_blocks); sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks, &[]); } @@ -2114,7 +2386,8 @@ mod tests { let retracted_blocks = vec![client.block_hash_delta_minus(1)]; let mut queue = VecDeque::new(); - let mut io = TestIo::new(&mut client, &mut queue, None); + let mut ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); // when sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[]); diff --git a/sync/src/lib.rs b/sync/src/lib.rs index c0c240f9d..d2c6e2583 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -26,40 +26,6 @@ //! Implements ethereum protocol version 63 as specified here: //! https://github.com/ethereum/wiki/wiki/Ethereum-Wire-Protocol //! -//! Usage example: -//! -//! ```rust -//! extern crate ethcore_util as util; -//! extern crate ethcore_io as io; -//! extern crate ethcore; -//! extern crate ethsync; -//! use std::env; -//! use io::IoChannel; -//! use ethcore::client::{Client, ClientConfig}; -//! use ethsync::{EthSync, SyncConfig, ManageNetwork, NetworkConfiguration}; -//! use ethcore::ethereum; -//! use ethcore::miner::{GasPricer, Miner}; -//! -//! fn main() { -//! let dir = env::temp_dir(); -//! let spec = ethereum::new_frontier(); -//! let miner = Miner::new( -//! Default::default(), -//! GasPricer::new_fixed(20_000_000_000u64.into()), -//! &spec, -//! None -//! ); -//! let client = Client::new( -//! ClientConfig::default(), -//! &spec, -//! &dir, -//! miner, -//! IoChannel::disconnected() -//! ).unwrap(); -//! let sync = EthSync::new(SyncConfig::default(), client, NetworkConfiguration::from(NetworkConfiguration::new())).unwrap(); -//! sync.start_network(); -//! } -//! ``` extern crate ethcore_network as network; extern crate ethcore_io as io; @@ -83,6 +49,7 @@ extern crate ethcore_ipc as ipc; mod chain; mod blocks; mod sync_io; +mod snapshot; #[cfg(test)] mod tests; @@ -96,4 +63,3 @@ pub use api::{EthSync, SyncProvider, SyncClient, NetworkManagerClient, ManageNet ServiceConfiguration, NetworkConfiguration}; pub use chain::{SyncStatus, SyncState}; pub use network::{is_valid_node_url, NonReservedPeerMode, NetworkError}; - diff --git a/sync/src/snapshot.rs b/sync/src/snapshot.rs new file mode 100644 index 000000000..ca9adf220 --- /dev/null +++ b/sync/src/snapshot.rs @@ -0,0 +1,200 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + + +use util::{H256, Hashable}; +use std::collections::HashSet; +use ethcore::snapshot::ManifestData; + +#[derive(PartialEq, Eq, Debug)] +pub enum ChunkType { + State(H256), + Block(H256), +} + +pub struct Snapshot { + pending_state_chunks: Vec, + pending_block_chunks: Vec, + downloading_chunks: HashSet, + completed_chunks: HashSet, + snapshot_hash: Option, +} + +impl Snapshot { + /// Create a new instance. + pub fn new() -> Snapshot { + Snapshot { + pending_state_chunks: Vec::new(), + pending_block_chunks: Vec::new(), + downloading_chunks: HashSet::new(), + completed_chunks: HashSet::new(), + snapshot_hash: None, + } + } + + /// Clear everything. + pub fn clear(&mut self) { + self.pending_state_chunks.clear(); + self.pending_block_chunks.clear(); + self.downloading_chunks.clear(); + self.completed_chunks.clear(); + self.snapshot_hash = None; + } + + /// Reset collection for a manifest RLP + pub fn reset_to(&mut self, manifest: &ManifestData, hash: &H256) { + self.clear(); + self.pending_state_chunks = manifest.state_hashes.clone(); + self.pending_block_chunks = manifest.block_hashes.clone(); + self.snapshot_hash = Some(hash.clone()); + } + + /// Validate chunk and mark it as downloaded + pub fn validate_chunk(&mut self, chunk: &[u8]) -> Result { + let hash = chunk.sha3(); + if self.completed_chunks.contains(&hash) { + trace!(target: "sync", "Ignored proccessed chunk: {}", hash.hex()); + return Err(()); + } + self.downloading_chunks.remove(&hash); + if self.pending_block_chunks.iter().any(|h| h == &hash) { + self.completed_chunks.insert(hash.clone()); + return Ok(ChunkType::Block(hash)); + } + if self.pending_state_chunks.iter().any(|h| h == &hash) { + self.completed_chunks.insert(hash.clone()); + return Ok(ChunkType::State(hash)); + } + trace!(target: "sync", "Ignored unknown chunk: {}", hash.hex()); + Err(()) + } + + /// Find a chunk to download + pub fn needed_chunk(&mut self) -> Option { + // check state chunks first + let mut chunk = self.pending_state_chunks.iter() + .find(|&h| !self.downloading_chunks.contains(h) && !self.completed_chunks.contains(h)) + .cloned(); + if chunk.is_none() { + chunk = self.pending_block_chunks.iter() + .find(|&h| !self.downloading_chunks.contains(h) && !self.completed_chunks.contains(h)) + .cloned(); + } + + if let Some(hash) = chunk { + self.downloading_chunks.insert(hash.clone()); + } + chunk + } + + pub fn clear_chunk_download(&mut self, hash: &H256) { + self.downloading_chunks.remove(hash); + } + + pub fn snapshot_hash(&self) -> Option { + self.snapshot_hash + } + + pub fn total_chunks(&self) -> usize { + self.pending_block_chunks.len() + self.pending_state_chunks.len() + } + + pub fn done_chunks(&self) -> usize { + self.total_chunks() - self.completed_chunks.len() + } + + pub fn is_complete(&self) -> bool { + self.total_chunks() == self.completed_chunks.len() + } +} + +#[cfg(test)] +mod test { + use util::*; + use super::*; + use ethcore::snapshot::ManifestData; + + fn is_empty(snapshot: &Snapshot) -> bool { + snapshot.pending_block_chunks.is_empty() && + snapshot.pending_state_chunks.is_empty() && + snapshot.completed_chunks.is_empty() && + snapshot.downloading_chunks.is_empty() && + snapshot.snapshot_hash.is_none() + } + + fn test_manifest() -> (ManifestData, H256, Vec, Vec) { + let state_chunks: Vec = (0..20).map(|_| H256::random().to_vec()).collect(); + let block_chunks: Vec = (0..20).map(|_| H256::random().to_vec()).collect(); + let manifest = ManifestData { + state_hashes: state_chunks.iter().map(|data| data.sha3()).collect(), + block_hashes: block_chunks.iter().map(|data| data.sha3()).collect(), + state_root: H256::new(), + block_number: 42, + block_hash: H256::new(), + }; + let mhash = manifest.clone().into_rlp().sha3(); + (manifest, mhash, state_chunks, block_chunks) + } + + #[test] + fn create_clear() { + let mut snapshot = Snapshot::new(); + assert!(is_empty(&snapshot)); + let (manifest, mhash, _, _,) = test_manifest(); + snapshot.reset_to(&manifest, &mhash); + assert!(!is_empty(&snapshot)); + snapshot.clear(); + assert!(is_empty(&snapshot)); + } + + #[test] + fn validate_chunks() { + let mut snapshot = Snapshot::new(); + let (manifest, mhash, state_chunks, block_chunks) = test_manifest(); + snapshot.reset_to(&manifest, &mhash); + assert!(snapshot.validate_chunk(&H256::random().to_vec()).is_err()); + + let requested: Vec = (0..40).map(|_| snapshot.needed_chunk().unwrap()).collect(); + assert!(snapshot.needed_chunk().is_none()); + assert_eq!(&requested[0..20], &manifest.state_hashes[..]); + assert_eq!(&requested[20..40], &manifest.block_hashes[..]); + assert_eq!(snapshot.downloading_chunks.len(), 40); + + assert_eq!(snapshot.validate_chunk(&state_chunks[4]), Ok(ChunkType::State(manifest.state_hashes[4].clone()))); + assert_eq!(snapshot.completed_chunks.len(), 1); + assert_eq!(snapshot.downloading_chunks.len(), 39); + + assert_eq!(snapshot.validate_chunk(&block_chunks[10]), Ok(ChunkType::Block(manifest.block_hashes[10].clone()))); + assert_eq!(snapshot.completed_chunks.len(), 2); + assert_eq!(snapshot.downloading_chunks.len(), 38); + + for (i, data) in state_chunks.iter().enumerate() { + if i != 4 { + assert!(snapshot.validate_chunk(data).is_ok()); + } + } + + for (i, data) in block_chunks.iter().enumerate() { + if i != 10 { + assert!(snapshot.validate_chunk(data).is_ok()); + } + } + + assert!(snapshot.is_complete()); + assert_eq!(snapshot.snapshot_hash(), Some(manifest.into_rlp().sha3())); + } +} + diff --git a/sync/src/sync_io.rs b/sync/src/sync_io.rs index 91070adc5..fa95941ea 100644 --- a/sync/src/sync_io.rs +++ b/sync/src/sync_io.rs @@ -16,6 +16,8 @@ use network::{NetworkContext, PeerId, PacketId, NetworkError}; use ethcore::client::BlockChainClient; +use ethcore::snapshot::SnapshotService; +use api::ETH_PROTOCOL; /// IO interface for the syning handler. /// Provides peer connection management and an interface to the blockchain client. @@ -31,10 +33,14 @@ pub trait SyncIo { fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec) -> Result<(), NetworkError>; /// Get the blockchain fn chain(&self) -> &BlockChainClient; + /// Get the snapshot service. + fn snapshot_service(&self) -> &SnapshotService; /// Returns peer client identifier string fn peer_info(&self, peer_id: PeerId) -> String { peer_id.to_string() } + /// Maximum mutuallt supported ETH protocol version + fn eth_protocol_version(&self, peer_id: PeerId) -> u8; /// Returns if the chain block queue empty fn is_chain_queue_empty(&self) -> bool { self.chain().queue_info().is_empty() @@ -46,15 +52,17 @@ pub trait SyncIo { /// Wraps `NetworkContext` and the blockchain client pub struct NetSyncIo<'s, 'h> where 'h: 's { network: &'s NetworkContext<'h>, - chain: &'s BlockChainClient + chain: &'s BlockChainClient, + snapshot_service: &'s SnapshotService, } impl<'s, 'h> NetSyncIo<'s, 'h> { /// Creates a new instance from the `NetworkContext` and the blockchain client reference. - pub fn new(network: &'s NetworkContext<'h>, chain: &'s BlockChainClient) -> NetSyncIo<'s, 'h> { + pub fn new(network: &'s NetworkContext<'h>, chain: &'s BlockChainClient, snapshot_service: &'s SnapshotService) -> NetSyncIo<'s, 'h> { NetSyncIo { network: network, chain: chain, + snapshot_service: snapshot_service, } } } @@ -80,6 +88,10 @@ impl<'s, 'h> SyncIo for NetSyncIo<'s, 'h> { self.chain } + fn snapshot_service(&self) -> &SnapshotService { + self.snapshot_service + } + fn peer_info(&self, peer_id: PeerId) -> String { self.network.peer_info(peer_id) } @@ -87,6 +99,10 @@ impl<'s, 'h> SyncIo for NetSyncIo<'s, 'h> { fn is_expired(&self) -> bool { self.network.is_expired() } + + fn eth_protocol_version(&self, peer_id: PeerId) -> u8 { + self.network.protocol_version(peer_id, ETH_PROTOCOL).unwrap_or(0) + } } diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index fba57681d..cbed49eff 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -16,22 +16,26 @@ use util::*; use network::*; +use tests::snapshot::*; use ethcore::client::{TestBlockChainClient, BlockChainClient}; use ethcore::header::BlockNumber; +use ethcore::snapshot::SnapshotService; use sync_io::SyncIo; use chain::ChainSync; use ::SyncConfig; pub struct TestIo<'p> { pub chain: &'p mut TestBlockChainClient, + pub snapshot_service: &'p TestSnapshotService, pub queue: &'p mut VecDeque, pub sender: Option, } impl<'p> TestIo<'p> { - pub fn new(chain: &'p mut TestBlockChainClient, queue: &'p mut VecDeque, sender: Option) -> TestIo<'p> { + pub fn new(chain: &'p mut TestBlockChainClient, ss: &'p TestSnapshotService, queue: &'p mut VecDeque, sender: Option) -> TestIo<'p> { TestIo { chain: chain, + snapshot_service: ss, queue: queue, sender: sender } @@ -70,6 +74,14 @@ impl<'p> SyncIo for TestIo<'p> { fn chain(&self) -> &BlockChainClient { self.chain } + + fn snapshot_service(&self) -> &SnapshotService { + self.snapshot_service + } + + fn eth_protocol_version(&self, _peer: PeerId) -> u8 { + 64 + } } pub struct TestPacket { @@ -80,6 +92,7 @@ pub struct TestPacket { pub struct TestPeer { pub chain: TestBlockChainClient, + pub snapshot_service: Arc, pub sync: RwLock, pub queue: VecDeque, } @@ -103,9 +116,11 @@ impl TestNet { let chain = TestBlockChainClient::new(); let mut config = SyncConfig::default(); config.fork_block = fork; + let ss = Arc::new(TestSnapshotService::new()); let sync = ChainSync::new(config, &chain); net.peers.push(TestPeer { sync: RwLock::new(sync), + snapshot_service: ss, chain: chain, queue: VecDeque::new(), }); @@ -126,7 +141,7 @@ impl TestNet { for client in 0..self.peers.len() { if peer != client { let mut p = self.peers.get_mut(peer).unwrap(); - p.sync.write().on_peer_connected(&mut TestIo::new(&mut p.chain, &mut p.queue, Some(client as PeerId)), client as PeerId); + p.sync.write().on_peer_connected(&mut TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(client as PeerId)), client as PeerId); } } } @@ -137,22 +152,22 @@ impl TestNet { if let Some(packet) = self.peers[peer].queue.pop_front() { let mut p = self.peers.get_mut(packet.recipient).unwrap(); trace!("--- {} -> {} ---", peer, packet.recipient); - ChainSync::dispatch_packet(&p.sync, &mut TestIo::new(&mut p.chain, &mut p.queue, Some(peer as PeerId)), peer as PeerId, packet.packet_id, &packet.data); + ChainSync::dispatch_packet(&p.sync, &mut TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(peer as PeerId)), peer as PeerId, packet.packet_id, &packet.data); trace!("----------------"); } let mut p = self.peers.get_mut(peer).unwrap(); - p.sync.write().maintain_sync(&mut TestIo::new(&mut p.chain, &mut p.queue, None)); + p.sync.write().maintain_sync(&mut TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, None)); } } pub fn sync_step_peer(&mut self, peer_num: usize) { let mut peer = self.peer_mut(peer_num); - peer.sync.write().maintain_sync(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None)); + peer.sync.write().maintain_sync(&mut TestIo::new(&mut peer.chain, &peer.snapshot_service, &mut peer.queue, None)); } pub fn restart_peer(&mut self, i: usize) { let peer = self.peer_mut(i); - peer.sync.write().restart(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None)); + peer.sync.write().restart(&mut TestIo::new(&mut peer.chain, &peer.snapshot_service, &mut peer.queue, None)); } pub fn sync(&mut self) -> u32 { @@ -181,6 +196,6 @@ impl TestNet { pub fn trigger_chain_new_blocks(&mut self, peer_id: usize) { let mut peer = self.peer_mut(peer_id); - peer.sync.write().chain_new_blocks(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None), &[], &[], &[], &[], &[]); + peer.sync.write().chain_new_blocks(&mut TestIo::new(&mut peer.chain, &peer.snapshot_service, &mut peer.queue, None), &[], &[], &[], &[], &[]); } } diff --git a/sync/src/tests/mod.rs b/sync/src/tests/mod.rs index 5afda05f0..bdb4ae4f9 100644 --- a/sync/src/tests/mod.rs +++ b/sync/src/tests/mod.rs @@ -15,5 +15,6 @@ // along with Parity. If not, see . pub mod helpers; +pub mod snapshot; mod chain; mod rpc; diff --git a/sync/src/tests/snapshot.rs b/sync/src/tests/snapshot.rs new file mode 100644 index 000000000..b27602b0d --- /dev/null +++ b/sync/src/tests/snapshot.rs @@ -0,0 +1,123 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use util::*; +use ethcore::snapshot::{SnapshotService, ManifestData, RestorationStatus}; +use ethcore::header::BlockNumber; +use ethcore::client::{EachBlockWith}; +use super::helpers::*; + +pub struct TestSnapshotService { + manifest: Option, + chunks: HashMap, + + restoration_manifest: Mutex>, + state_restoration_chunks: Mutex>, + block_restoration_chunks: Mutex>, +} + +impl TestSnapshotService { + pub fn new() -> TestSnapshotService { + TestSnapshotService { + manifest: None, + chunks: HashMap::new(), + restoration_manifest: Mutex::new(None), + state_restoration_chunks: Mutex::new(HashMap::new()), + block_restoration_chunks: Mutex::new(HashMap::new()), + } + } + + pub fn new_with_snapshot(num_chunks: usize, block_hash: H256, block_number: BlockNumber) -> TestSnapshotService { + let num_state_chunks = num_chunks / 2; + let num_block_chunks = num_chunks - num_state_chunks; + let state_chunks: Vec = (0..num_state_chunks).map(|_| H256::random().to_vec()).collect(); + let block_chunks: Vec = (0..num_block_chunks).map(|_| H256::random().to_vec()).collect(); + let manifest = ManifestData { + state_hashes: state_chunks.iter().map(|data| data.sha3()).collect(), + block_hashes: block_chunks.iter().map(|data| data.sha3()).collect(), + state_root: H256::new(), + block_number: block_number, + block_hash: block_hash, + }; + let mut chunks: HashMap = state_chunks.into_iter().map(|data| (data.sha3(), data)).collect(); + chunks.extend(block_chunks.into_iter().map(|data| (data.sha3(), data))); + TestSnapshotService { + manifest: Some(manifest), + chunks: chunks, + restoration_manifest: Mutex::new(None), + state_restoration_chunks: Mutex::new(HashMap::new()), + block_restoration_chunks: Mutex::new(HashMap::new()), + } + } +} + +impl SnapshotService for TestSnapshotService { + fn manifest(&self) -> Option { + self.manifest.as_ref().cloned() + } + + fn chunk(&self, hash: H256) -> Option { + self.chunks.get(&hash).cloned() + } + + fn status(&self) -> RestorationStatus { + match &*self.restoration_manifest.lock() { + &Some(ref manifest) if self.state_restoration_chunks.lock().len() == manifest.state_hashes.len() && + self.block_restoration_chunks.lock().len() == manifest.block_hashes.len() => RestorationStatus::Inactive, + &Some(_) => RestorationStatus::Ongoing { + state_chunks_done: self.state_restoration_chunks.lock().len() as u32, + block_chunks_done: self.block_restoration_chunks.lock().len() as u32, + }, + &None => RestorationStatus::Inactive, + } + } + + fn begin_restore(&self, manifest: ManifestData) { + *self.restoration_manifest.lock() = Some(manifest); + self.state_restoration_chunks.lock().clear(); + self.block_restoration_chunks.lock().clear(); + } + + fn abort_restore(&self) { + *self.restoration_manifest.lock() = None; + self.state_restoration_chunks.lock().clear(); + self.block_restoration_chunks.lock().clear(); + } + + fn restore_state_chunk(&self, hash: H256, chunk: Bytes) { + if self.restoration_manifest.lock().as_ref().map_or(false, |ref m| m.state_hashes.iter().any(|h| h == &hash)) { + self.state_restoration_chunks.lock().insert(hash, chunk); + } + } + + fn restore_block_chunk(&self, hash: H256, chunk: Bytes) { + if self.restoration_manifest.lock().as_ref().map_or(false, |ref m| m.block_hashes.iter().any(|h| h == &hash)) { + self.block_restoration_chunks.lock().insert(hash, chunk); + } + } +} + +#[test] +fn snapshot_sync() { + ::env_logger::init().ok(); + let mut net = TestNet::new(2); + net.peer_mut(0).snapshot_service = Arc::new(TestSnapshotService::new_with_snapshot(16, H256::new(), 1)); + net.peer_mut(0).chain.add_blocks(1, EachBlockWith::Nothing); + net.sync_steps(19); // status + manifest + chunks + assert_eq!(net.peer(1).snapshot_service.state_restoration_chunks.lock().len(), net.peer(0).snapshot_service.manifest.as_ref().unwrap().state_hashes.len()); + assert_eq!(net.peer(1).snapshot_service.block_restoration_chunks.lock().len(), net.peer(0).snapshot_service.manifest.as_ref().unwrap().block_hashes.len()); +} + diff --git a/util/network/src/host.rs b/util/network/src/host.rs index 359f54f1a..ebc10324f 100644 --- a/util/network/src/host.rs +++ b/util/network/src/host.rs @@ -282,6 +282,12 @@ impl<'s> NetworkContext<'s> { } "unknown".to_owned() } + + /// Returns max version for a given protocol. + pub fn protocol_version(&self, peer: PeerId, protocol: &str) -> Option { + let session = self.resolve_session(peer); + session.and_then(|s| s.lock().capability_version(protocol)) + } } /// Shared host information diff --git a/util/network/src/session.rs b/util/network/src/session.rs index 8ebd37090..164248d62 100644 --- a/util/network/src/session.rs +++ b/util/network/src/session.rs @@ -243,6 +243,11 @@ impl Session { self.info.capabilities.iter().any(|c| c.protocol == protocol) } + /// Checks if peer supports given capability + pub fn capability_version(&self, protocol: &str) -> Option { + self.info.capabilities.iter().filter_map(|c| if c.protocol == protocol { Some(c.version) } else { None }).max() + } + /// Register the session socket with the event loop pub fn register_socket>(&self, reg: Token, event_loop: &mut EventLoop) -> Result<(), NetworkError> { if self.expired() { diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs index 5db7801a1..177df5fa0 100644 --- a/util/src/kvdb.rs +++ b/util/src/kvdb.rs @@ -16,9 +16,11 @@ //! Key-Value store abstraction with `RocksDB` backend. +use std::io::ErrorKind; use common::*; use elastic_array::*; use std::default::Default; +use std::path::PathBuf; use rlp::{UntrustedRlp, RlpType, View, Compressible}; use rocksdb::{DB, Writable, WriteBatch, WriteOptions, IteratorMode, DBIterator, Options, DBCompactionStyle, BlockBasedOptions, Direction, Cache, Column}; @@ -189,12 +191,18 @@ impl<'a> Iterator for DatabaseIterator { } } +struct DBAndColumns { + db: DB, + cfs: Vec, +} + /// Key-Value database. pub struct Database { - db: DB, + db: RwLock>, + config: DatabaseConfig, write_opts: WriteOptions, - cfs: Vec, overlay: RwLock, KeyState>>>, + path: String, } impl Database { @@ -278,11 +286,13 @@ impl Database { }, Err(s) => { return Err(s); } }; + let num_cols = cfs.len(); Ok(Database { - db: db, + db: RwLock::new(Some(DBAndColumns{ db: db, cfs: cfs })), + config: config.clone(), write_opts: write_opts, - overlay: RwLock::new((0..(cfs.len() + 1)).map(|_| HashMap::new()).collect()), - cfs: cfs, + overlay: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()), + path: path.to_owned(), }) } @@ -320,94 +330,167 @@ impl Database { /// Commit buffered changes to database. pub fn flush(&self) -> Result<(), String> { - let batch = WriteBatch::new(); - let mut overlay = self.overlay.write(); + match &*self.db.read() { + &Some(DBAndColumns { ref db, ref cfs }) => { + let batch = WriteBatch::new(); + let mut overlay = self.overlay.write(); - for (c, column) in overlay.iter_mut().enumerate() { - let column_data = mem::replace(column, HashMap::new()); - for (key, state) in column_data.into_iter() { - match state { - KeyState::Delete => { - if c > 0 { - try!(batch.delete_cf(self.cfs[c - 1], &key)); - } else { - try!(batch.delete(&key)); - } - }, - KeyState::Insert(value) => { - if c > 0 { - try!(batch.put_cf(self.cfs[c - 1], &key, &value)); - } else { - try!(batch.put(&key, &value)); - } - }, - KeyState::InsertCompressed(value) => { - let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks); - if c > 0 { - try!(batch.put_cf(self.cfs[c - 1], &key, &compressed)); - } else { - try!(batch.put(&key, &value)); + for (c, column) in overlay.iter_mut().enumerate() { + let column_data = mem::replace(column, HashMap::new()); + for (key, state) in column_data.into_iter() { + match state { + KeyState::Delete => { + if c > 0 { + try!(batch.delete_cf(cfs[c - 1], &key)); + } else { + try!(batch.delete(&key)); + } + }, + KeyState::Insert(value) => { + if c > 0 { + try!(batch.put_cf(cfs[c - 1], &key, &value)); + } else { + try!(batch.put(&key, &value)); + } + }, + KeyState::InsertCompressed(value) => { + let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks); + if c > 0 { + try!(batch.put_cf(cfs[c - 1], &key, &compressed)); + } else { + try!(batch.put(&key, &value)); + } + } } } } - } + db.write_opt(batch, &self.write_opts) + }, + &None => Err("Database is closed".to_owned()) } - self.db.write_opt(batch, &self.write_opts) } /// Commit transaction to database. pub fn write(&self, tr: DBTransaction) -> Result<(), String> { - let batch = WriteBatch::new(); - let ops = tr.ops; - for op in ops { - match op { - DBOp::Insert { col, key, value } => { - try!(col.map_or_else(|| batch.put(&key, &value), |c| batch.put_cf(self.cfs[c as usize], &key, &value))) - }, - DBOp::InsertCompressed { col, key, value } => { - let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks); - try!(col.map_or_else(|| batch.put(&key, &compressed), |c| batch.put_cf(self.cfs[c as usize], &key, &compressed))) - }, - DBOp::Delete { col, key } => { - try!(col.map_or_else(|| batch.delete(&key), |c| batch.delete_cf(self.cfs[c as usize], &key))) - }, - } + match &*self.db.read() { + &Some(DBAndColumns { ref db, ref cfs }) => { + let batch = WriteBatch::new(); + let ops = tr.ops; + for op in ops { + match op { + DBOp::Insert { col, key, value } => { + try!(col.map_or_else(|| batch.put(&key, &value), |c| batch.put_cf(cfs[c as usize], &key, &value))) + }, + DBOp::InsertCompressed { col, key, value } => { + let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks); + try!(col.map_or_else(|| batch.put(&key, &compressed), |c| batch.put_cf(cfs[c as usize], &key, &compressed))) + }, + DBOp::Delete { col, key } => { + try!(col.map_or_else(|| batch.delete(&key), |c| batch.delete_cf(cfs[c as usize], &key))) + }, + } + } + db.write_opt(batch, &self.write_opts) + }, + &None => Err("Database is closed".to_owned()) } - self.db.write_opt(batch, &self.write_opts) } /// Get value by key. pub fn get(&self, col: Option, key: &[u8]) -> Result, String> { - let overlay = &self.overlay.read()[Self::to_overlay_column(col)]; - match overlay.get(key) { - Some(&KeyState::Insert(ref value)) | Some(&KeyState::InsertCompressed(ref value)) => Ok(Some(value.clone())), - Some(&KeyState::Delete) => Ok(None), - None => { - col.map_or_else( - || self.db.get(key).map(|r| r.map(|v| v.to_vec())), - |c| self.db.get_cf(self.cfs[c as usize], key).map(|r| r.map(|v| v.to_vec()))) + match &*self.db.read() { + &Some(DBAndColumns { ref db, ref cfs }) => { + let overlay = &self.overlay.read()[Self::to_overlay_column(col)]; + match overlay.get(key) { + Some(&KeyState::Insert(ref value)) | Some(&KeyState::InsertCompressed(ref value)) => Ok(Some(value.clone())), + Some(&KeyState::Delete) => Ok(None), + None => { + col.map_or_else( + || db.get(key).map(|r| r.map(|v| v.to_vec())), + |c| db.get_cf(cfs[c as usize], key).map(|r| r.map(|v| v.to_vec()))) + }, + } }, + &None => Ok(None), } } /// Get value by partial key. Prefix size should match configured prefix size. Only searches flushed values. - // TODO: support prefix seek for unflushed ata + // TODO: support prefix seek for unflushed data pub fn get_by_prefix(&self, col: Option, prefix: &[u8]) -> Option> { - let mut iter = col.map_or_else(|| self.db.iterator(IteratorMode::From(prefix, Direction::Forward)), - |c| self.db.iterator_cf(self.cfs[c as usize], IteratorMode::From(prefix, Direction::Forward)).unwrap()); - match iter.next() { - // TODO: use prefix_same_as_start read option (not availabele in C API currently) - Some((k, v)) => if k[0 .. prefix.len()] == prefix[..] { Some(v) } else { None }, - _ => None + match &*self.db.read() { + &Some(DBAndColumns { ref db, ref cfs }) => { + let mut iter = col.map_or_else(|| db.iterator(IteratorMode::From(prefix, Direction::Forward)), + |c| db.iterator_cf(cfs[c as usize], IteratorMode::From(prefix, Direction::Forward)).unwrap()); + match iter.next() { + // TODO: use prefix_same_as_start read option (not availabele in C API currently) + Some((k, v)) => if k[0 .. prefix.len()] == prefix[..] { Some(v) } else { None }, + _ => None + } + }, + &None => None, } } /// Get database iterator for flushed data. pub fn iter(&self, col: Option) -> DatabaseIterator { //TODO: iterate over overlay - col.map_or_else(|| DatabaseIterator { iter: self.db.iterator(IteratorMode::Start) }, - |c| DatabaseIterator { iter: self.db.iterator_cf(self.cfs[c as usize], IteratorMode::Start).unwrap() }) + match &*self.db.read() { + &Some(DBAndColumns { ref db, ref cfs }) => { + col.map_or_else(|| DatabaseIterator { iter: db.iterator(IteratorMode::Start) }, + |c| DatabaseIterator { iter: db.iterator_cf(cfs[c as usize], IteratorMode::Start).unwrap() }) + }, + &None => panic!("Not supported yet") //TODO: return an empty iterator or change return type + } + } + + /// Close the database + fn close(&self) { + *self.db.write() = None; + self.overlay.write().clear(); + } + + /// Restore the database from a copy at given path. + pub fn restore(&self, new_db: &str) -> Result<(), UtilError> { + self.close(); + + let mut backup_db = PathBuf::from(&self.path); + backup_db.pop(); + backup_db.push("backup_db"); + println!("Path at {:?}", self.path); + println!("Backup at {:?}", backup_db); + + let existed = match fs::rename(&self.path, &backup_db) { + Ok(_) => true, + Err(e) => if let ErrorKind::NotFound = e.kind() { + false + } else { + return Err(e.into()); + } + }; + + match fs::rename(&new_db, &self.path) { + Ok(_) => { + // clean up the backup. + if existed { + try!(fs::remove_dir_all(&backup_db)); + } + } + Err(e) => { + // restore the backup. + if existed { + try!(fs::rename(&backup_db, &self.path)); + } + return Err(e.into()) + } + } + + // reopen the database and steal handles into self + let db = try!(Self::open(&self.config, &self.path)); + *self.db.write() = mem::replace(&mut *db.db.write(), None); + *self.overlay.write() = mem::replace(&mut *db.overlay.write(), Vec::new()); + Ok(()) } } From 46581e173da1413bbead7a2a70c86471dbe513ce Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 6 Sep 2016 15:49:44 +0200 Subject: [PATCH 11/17] check block queue size before taking periodic snapshot --- ethcore/src/snapshot/watcher.rs | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/ethcore/src/snapshot/watcher.rs b/ethcore/src/snapshot/watcher.rs index 5a0c3eafc..8f9d3833b 100644 --- a/ethcore/src/snapshot/watcher.rs +++ b/ethcore/src/snapshot/watcher.rs @@ -26,15 +26,23 @@ use util::hash::H256; use std::sync::Arc; -// helper trait for transforming hashes to numbers. -trait HashToNumber: Send + Sync { +// helper trait for transforming hashes to numbers and checking if syncing. +trait Oracle: Send + Sync { fn to_number(&self, hash: H256) -> Option; + + fn is_major_syncing(&self) -> bool; } -impl HashToNumber for Client { +impl Oracle for Client { fn to_number(&self, hash: H256) -> Option { self.block_header(BlockID::Hash(hash)).map(|h| HeaderView::new(&h).number()) } + + fn is_major_syncing(&self) -> bool { + let queue_info = self.queue_info(); + + queue_info.unverified_queue_size + queue_info.verified_queue_size > 3 + } } // helper trait for broadcasting a block to take a snapshot at. @@ -60,7 +68,7 @@ impl Broadcast for IoChannel { /// A `ChainNotify` implementation which will trigger a snapshot event /// at certain block numbers. pub struct Watcher { - oracle: Arc, + oracle: Arc, broadcast: Box, period: u64, history: u64, @@ -90,6 +98,8 @@ impl ChainNotify for Watcher { _: Vec, _duration: u64) { + if self.oracle.is_major_syncing() { return } + trace!(target: "snapshot_watcher", "{} imported", imported.len()); let highest = imported.into_iter() @@ -108,7 +118,7 @@ impl ChainNotify for Watcher { #[cfg(test)] mod tests { - use super::{Broadcast, HashToNumber, Watcher}; + use super::{Broadcast, Oracle, Watcher}; use client::ChainNotify; @@ -119,10 +129,12 @@ mod tests { struct TestOracle(HashMap); - impl HashToNumber for TestOracle { + impl Oracle for TestOracle { fn to_number(&self, hash: H256) -> Option { self.0.get(&hash).cloned() } + + fn is_major_syncing(&self) -> bool { false } } struct TestBroadcast(Option); From f054a7b8d5d5a8320c05495674840cab8e2620c7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 6 Sep 2016 17:44:11 +0200 Subject: [PATCH 12/17] more info on current periodic snapshot --- ethcore/src/service.rs | 10 ++++++++-- ethcore/src/snapshot/mod.rs | 19 +++++++++++++++---- ethcore/src/snapshot/service.rs | 19 ++++++++++++++++--- 3 files changed, 39 insertions(+), 9 deletions(-) diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index 9981bce6e..1f377d0ae 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -147,16 +147,22 @@ struct ClientIoHandler { } const CLIENT_TICK_TIMER: TimerToken = 0; +const SNAPSHOT_TICK_TIMER: TimerToken = 1; + const CLIENT_TICK_MS: u64 = 5000; +const SNAPSHOT_TICK_MS: u64 = 10000; impl IoHandler for ClientIoHandler { fn initialize(&self, io: &IoContext) { io.register_timer(CLIENT_TICK_TIMER, CLIENT_TICK_MS).expect("Error registering client timer"); + io.register_timer(SNAPSHOT_TICK_TIMER, SNAPSHOT_TICK_MS).expect("Error registering snapshot timer"); } fn timeout(&self, _io: &IoContext, timer: TimerToken) { - if timer == CLIENT_TICK_TIMER { - self.client.tick(); + match timer { + CLIENT_TICK_TIMER => self.client.tick(), + SNAPSHOT_TICK_TIMER => self.snapshot.tick(), + _ => warn!("IO service triggered unregistered timer '{}'", timer), } } diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index be7ac2b64..43622fc51 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -83,17 +83,28 @@ pub struct Progress { } impl Progress { + /// Reset the progress. + pub fn reset(&self) { + self.accounts.store(0, Ordering::Release); + self.blocks.store(0, Ordering::Release); + self.size.store(0, Ordering::Release); + + // atomic fence here to ensure the others are written first? + // logs might very rarely get polluted if not. + self.done.store(false, Ordering::Release); + } + /// Get the number of accounts snapshotted thus far. - pub fn accounts(&self) -> usize { self.accounts.load(Ordering::Relaxed) } + pub fn accounts(&self) -> usize { self.accounts.load(Ordering::Acquire) } /// Get the number of blocks snapshotted thus far. - pub fn blocks(&self) -> usize { self.blocks.load(Ordering::Relaxed) } + pub fn blocks(&self) -> usize { self.blocks.load(Ordering::Acquire) } /// Get the written size of the snapshot in bytes. - pub fn size(&self) -> usize { self.size.load(Ordering::Relaxed) } + pub fn size(&self) -> usize { self.size.load(Ordering::Acquire) } /// Whether the snapshot is complete. - pub fn done(&self) -> bool { self.done.load(Ordering::SeqCst) } + pub fn done(&self) -> bool { self.done.load(Ordering::Acquire) } } /// Take a snapshot using the given blockchain, starting block hash, and database, writing into the given writer. diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index 729fc851e..ce34a0def 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -191,6 +191,7 @@ pub struct Service { state_chunks: AtomicUsize, block_chunks: AtomicUsize, db_restore: Arc, + progress: super::Progress, } impl Service { @@ -220,6 +221,7 @@ impl Service { state_chunks: AtomicUsize::new(0), block_chunks: AtomicUsize::new(0), db_restore: db_restore, + progress: Default::default(), }; // create the root snapshot dir if it doesn't exist. @@ -297,12 +299,22 @@ impl Service { Ok(()) } + /// Tick the snapshot service. This will log any active snapshot + /// being taken. + pub fn tick(&self) { + if self.progress.done() { return } + + let p = &self.progress; + info!("Snapshot: {} accounts {} blocks {} bytes", p.accounts(), p.blocks(), p.size()); + } + /// Take a snapshot at the block with the given number. /// calling this while a restoration is in progress or vice versa /// will lead to a race condition where the first one to finish will /// have their produced snapshot overwritten. pub fn take_snapshot(&self, client: &Client, num: u64) -> Result<(), Error> { info!("Taking snapshot at #{}", num); + self.progress.reset(); let temp_dir = self.temp_snapshot_dir(); let snapshot_dir = self.snapshot_dir(); @@ -310,11 +322,12 @@ impl Service { let _ = fs::remove_dir_all(&temp_dir); let writer = try!(LooseWriter::new(temp_dir.clone())); - let progress = Default::default(); - // Todo [rob] log progress. let guard = Guard::new(temp_dir.clone()); - try!(client.take_snapshot(writer, BlockID::Number(num), &progress)); + try!(client.take_snapshot(writer, BlockID::Number(num), &self.progress)); + + info!("Finished taking snapshot at #{}", num); + let mut reader = self.reader.write(); // destroy the old snapshot reader. From 61d3d749343feb463662e7a1e57263a32b85de03 Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 7 Sep 2016 10:36:18 +0200 Subject: [PATCH 13/17] fixed compiling rpc tests --- rpc/src/v1/tests/eth.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rpc/src/v1/tests/eth.rs b/rpc/src/v1/tests/eth.rs index b7ad5b943..d76ac01d6 100644 --- a/rpc/src/v1/tests/eth.rs +++ b/rpc/src/v1/tests/eth.rs @@ -286,7 +286,7 @@ const POSITIVE_NONCE_SPEC: &'static [u8] = br#"{ #[test] fn eth_transaction_count() { let secret = "8a283037bb19c4fed7b1c569e40c7dcff366165eb869110a1b11532963eb9cb2".into(); - let tester = EthTester::from_spec(Spec::load(TRANSACTION_COUNT_SPEC)); + let tester = EthTester::from_spec(Spec::load(TRANSACTION_COUNT_SPEC).expect("invalid chain spec")); let address = tester.accounts.insert_account(secret, "").unwrap(); tester.accounts.unlock_account_permanently(address, "".into()).unwrap(); @@ -412,7 +412,7 @@ fn verify_transaction_counts(name: String, chain: BlockChain) { #[test] fn starting_nonce_test() { - let tester = EthTester::from_spec(Spec::load(POSITIVE_NONCE_SPEC)); + let tester = EthTester::from_spec(Spec::load(POSITIVE_NONCE_SPEC).expect("invalid chain spec")); let address = Address::from(10); let sample = tester.handler.handle_request_sync(&(r#" From fca2b1a242cf11f50e67eec2688ef682bcd5f3b1 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 7 Sep 2016 13:49:11 +0200 Subject: [PATCH 14/17] Forward ethstore-cli feature --- Cargo.toml | 1 + ethcore/Cargo.toml | 1 + 2 files changed, 2 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index d8e42bf3a..762f7cb4c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,6 +63,7 @@ ipc = ["ethcore/ipc"] dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethcore-dapps/dev", "ethcore-signer/dev"] json-tests = ["ethcore/json-tests"] stratum = ["ipc"] +ethstore-cli = ["ethcore/ethstore-cli"] [[bin]] path = "parity/main.rs" diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index 8acba2266..cd15e53da 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -51,3 +51,4 @@ dev = ["clippy"] default = [] benches = [] ipc = [] +ethstore-cli = ["ethstore/cli"] From e9593e0abbcd8cf6a1c5ec8a7f40ae50820d0a6b Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 7 Sep 2016 13:59:14 +0200 Subject: [PATCH 15/17] ethkey-cli --- Cargo.toml | 1 + ethcore/Cargo.toml | 1 + 2 files changed, 2 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 762f7cb4c..112a36312 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,6 +63,7 @@ ipc = ["ethcore/ipc"] dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethcore-dapps/dev", "ethcore-signer/dev"] json-tests = ["ethcore/json-tests"] stratum = ["ipc"] +ethkey-cli = ["ethcore/ethkey-cli"] ethstore-cli = ["ethcore/ethstore-cli"] [[bin]] diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index cd15e53da..fe6a682cb 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -51,4 +51,5 @@ dev = ["clippy"] default = [] benches = [] ipc = [] +ethkey-cli = ["ethkey/cli"] ethstore-cli = ["ethstore/cli"] From 541b14a4abf06d4a9a54869bc09b3a325128e0d8 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 7 Sep 2016 15:27:14 +0200 Subject: [PATCH 16/17] periodic snapshot tweaks (#2054) * periodic snapshot tweaks * set SNAPSHOT_HISTORY to 500 --- ethcore/src/snapshot/service.rs | 16 +++++++++++++--- ethcore/src/snapshot/watcher.rs | 29 ++++++++++++++++++++--------- parity/run.rs | 6 ++++-- 3 files changed, 37 insertions(+), 14 deletions(-) diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index ce34a0def..4dbbaa1d4 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -21,7 +21,7 @@ use std::io::ErrorKind; use std::fs; use std::path::{Path, PathBuf}; use std::sync::Arc; -use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use super::{ManifestData, StateRebuilder, BlockRebuilder, RestorationStatus, SnapshotService}; use super::io::{SnapshotReader, LooseReader, SnapshotWriter, LooseWriter}; @@ -192,6 +192,7 @@ pub struct Service { block_chunks: AtomicUsize, db_restore: Arc, progress: super::Progress, + taking_snapshot: AtomicBool, } impl Service { @@ -222,6 +223,7 @@ impl Service { block_chunks: AtomicUsize::new(0), db_restore: db_restore, progress: Default::default(), + taking_snapshot: AtomicBool::new(false), }; // create the root snapshot dir if it doesn't exist. @@ -302,7 +304,7 @@ impl Service { /// Tick the snapshot service. This will log any active snapshot /// being taken. pub fn tick(&self) { - if self.progress.done() { return } + if self.progress.done() || !self.taking_snapshot.load(Ordering::SeqCst) { return } let p = &self.progress; info!("Snapshot: {} accounts {} blocks {} bytes", p.accounts(), p.blocks(), p.size()); @@ -313,6 +315,11 @@ impl Service { /// will lead to a race condition where the first one to finish will /// have their produced snapshot overwritten. pub fn take_snapshot(&self, client: &Client, num: u64) -> Result<(), Error> { + if self.taking_snapshot.compare_and_swap(false, true, Ordering::SeqCst) { + info!("Skipping snapshot at #{} as another one is currently in-progress.", num); + return Ok(()); + } + info!("Taking snapshot at #{}", num); self.progress.reset(); @@ -324,7 +331,10 @@ impl Service { let writer = try!(LooseWriter::new(temp_dir.clone())); let guard = Guard::new(temp_dir.clone()); - try!(client.take_snapshot(writer, BlockID::Number(num), &self.progress)); + let res = client.take_snapshot(writer, BlockID::Number(num), &self.progress); + + self.taking_snapshot.store(false, Ordering::SeqCst); + try!(res); info!("Finished taking snapshot at #{}", num); diff --git a/ethcore/src/snapshot/watcher.rs b/ethcore/src/snapshot/watcher.rs index 8f9d3833b..65f47efc8 100644 --- a/ethcore/src/snapshot/watcher.rs +++ b/ethcore/src/snapshot/watcher.rs @@ -33,15 +33,22 @@ trait Oracle: Send + Sync { fn is_major_syncing(&self) -> bool; } -impl Oracle for Client { +struct StandardOracle where F: 'static + Send + Sync + Fn() -> bool { + client: Arc, + sync_status: F, +} + +impl Oracle for StandardOracle + where F: Send + Sync + Fn() -> bool +{ fn to_number(&self, hash: H256) -> Option { - self.block_header(BlockID::Hash(hash)).map(|h| HeaderView::new(&h).number()) + self.client.block_header(BlockID::Hash(hash)).map(|h| HeaderView::new(&h).number()) } fn is_major_syncing(&self) -> bool { - let queue_info = self.queue_info(); + let queue_info = self.client.queue_info(); - queue_info.unverified_queue_size + queue_info.verified_queue_size > 3 + (self.sync_status)() || queue_info.unverified_queue_size + queue_info.verified_queue_size > 3 } } @@ -68,7 +75,7 @@ impl Broadcast for IoChannel { /// A `ChainNotify` implementation which will trigger a snapshot event /// at certain block numbers. pub struct Watcher { - oracle: Arc, + oracle: Box, broadcast: Box, period: u64, history: u64, @@ -78,9 +85,14 @@ impl Watcher { /// Create a new `Watcher` which will trigger a snapshot event /// once every `period` blocks, but only after that block is /// `history` blocks old. - pub fn new(client: Arc, channel: IoChannel, period: u64, history: u64) -> Self { + pub fn new(client: Arc, sync_status: F, channel: IoChannel, period: u64, history: u64) -> Self + where F: 'static + Send + Sync + Fn() -> bool + { Watcher { - oracle: client, + oracle: Box::new(StandardOracle { + client: client, + sync_status: sync_status, + }), broadcast: Box::new(channel), period: period, history: history, @@ -125,7 +137,6 @@ mod tests { use util::{H256, U256}; use std::collections::HashMap; - use std::sync::Arc; struct TestOracle(HashMap); @@ -152,7 +163,7 @@ mod tests { let map = hashes.clone().into_iter().zip(numbers).collect(); let watcher = Watcher { - oracle: Arc::new(TestOracle(map)), + oracle: Box::new(TestOracle(map)), broadcast: Box::new(TestBroadcast(expected)), period: period, history: history, diff --git a/parity/run.rs b/parity/run.rs index aaae33b60..c6571b9b2 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -29,7 +29,7 @@ use ethcore::service::ClientService; use ethcore::account_provider::AccountProvider; use ethcore::miner::{Miner, MinerService, ExternalMiner, MinerOptions}; use ethcore::snapshot; -use ethsync::SyncConfig; +use ethsync::{SyncConfig, SyncProvider}; use informant::Informant; use rpc::{HttpServer, IpcServer, HttpConfiguration, IpcConfiguration}; @@ -51,7 +51,7 @@ use url; const SNAPSHOT_PERIOD: u64 = 10000; // how many blocks to wait before starting a periodic snapshot. -const SNAPSHOT_HISTORY: u64 = 1000; +const SNAPSHOT_HISTORY: u64 = 500; #[derive(Debug, PartialEq)] pub struct RunCmd { @@ -263,8 +263,10 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { let _watcher = match cmd.no_periodic_snapshot { true => None, false => { + let sync = sync_provider.clone(); let watcher = Arc::new(snapshot::Watcher::new( service.client(), + move || sync.status().is_major_syncing(), service.io().channel(), SNAPSHOT_PERIOD, SNAPSHOT_HISTORY, From 57d5c35bb6eed2c93b2345f3a28d9577413979a7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 7 Sep 2016 15:27:28 +0200 Subject: [PATCH 17/17] Use proper database configuration in snapshots. (#2052) * use proper database config in snapshot service * add snapshot path to parity directories struct * fix RPC tests --- ethcore/src/client/client.rs | 5 +- ethcore/src/json_tests/chain.rs | 4 +- ethcore/src/service.rs | 47 ++++++++++--- ethcore/src/snapshot/service.rs | 119 ++++++++++++++++++-------------- ethcore/src/tests/client.rs | 43 ++++++++++-- ethcore/src/tests/helpers.rs | 24 ++++++- ethcore/src/tests/rpc.rs | 7 +- parity/blockchain.rs | 17 +++-- parity/dir.rs | 17 ++++- parity/run.rs | 11 +-- parity/snapshot.rs | 8 ++- rpc/src/v1/tests/eth.rs | 11 ++- 12 files changed, 218 insertions(+), 95 deletions(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index dfb4f3ad3..e1d603082 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -161,13 +161,10 @@ impl Client { path: &Path, miner: Arc, message_channel: IoChannel, + db_config: &DatabaseConfig, ) -> Result, ClientError> { let path = path.to_path_buf(); let gb = spec.genesis_block(); - let mut db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); - db_config.cache_size = config.db_cache_size; - db_config.compaction = config.db_compaction.compaction_profile(); - db_config.wal = config.db_wal; let db = Arc::new(try!(Database::open(&db_config, &path.to_str().unwrap()).map_err(ClientError::Database))); let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone())); diff --git a/ethcore/src/json_tests/chain.rs b/ethcore/src/json_tests/chain.rs index 16161e158..93b0cf82c 100644 --- a/ethcore/src/json_tests/chain.rs +++ b/ethcore/src/json_tests/chain.rs @@ -58,12 +58,14 @@ pub fn json_chain_test(json_data: &[u8], era: ChainEra) -> Vec { let temp = RandomTempPath::new(); { + let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); let client = Client::new( ClientConfig::default(), &spec, temp.as_path(), Arc::new(Miner::with_spec(&spec)), - IoChannel::disconnected() + IoChannel::disconnected(), + &db_config, ).unwrap(); for b in &blockchain.blocks_rlp() { if Block::is_good(&b) { diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index 1f377d0ae..a2b483d40 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -23,7 +23,7 @@ use error::*; use client::{Client, ClientConfig, ChainNotify}; use miner::Miner; use snapshot::ManifestData; -use snapshot::service::Service as SnapshotService; +use snapshot::service::{Service as SnapshotService, ServiceParams as SnapServiceParams}; use std::sync::atomic::AtomicBool; #[cfg(feature="ipc")] @@ -60,11 +60,12 @@ pub struct ClientService { } impl ClientService { - /// Start the service in a separate thread. + /// Start the `ClientService`. pub fn start( config: ClientConfig, spec: &Spec, - db_path: &Path, + client_path: &Path, + snapshot_path: &Path, ipc_path: &Path, miner: Arc, ) -> Result @@ -78,11 +79,25 @@ impl ClientService { warn!("Your chain is an alternative fork. {}", Colour::Red.bold().paint("TRANSACTIONS MAY BE REPLAYED ON THE MAINNET!")); } - let pruning = config.pruning; - let client = try!(Client::new(config, &spec, db_path, miner, io_service.channel())); - let snapshot = try!(SnapshotService::new(spec, pruning, db_path.into(), io_service.channel(), client.clone())); + let mut db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + db_config.cache_size = config.db_cache_size; + db_config.compaction = config.db_compaction.compaction_profile(); + db_config.wal = config.db_wal; - let snapshot = Arc::new(snapshot); + let pruning = config.pruning; + let client = try!(Client::new(config, &spec, client_path, miner, io_service.channel(), &db_config)); + + let snapshot_params = SnapServiceParams { + engine: spec.engine.clone(), + genesis_block: spec.genesis_block(), + db_config: db_config, + pruning: pruning, + channel: io_service.channel(), + snapshot_root: snapshot_path.into(), + client_db: client_path.into(), + db_restore: client.clone(), + }; + let snapshot = Arc::new(try!(SnapshotService::new(snapshot_params))); panic_handler.forward_from(&*client); let client_io = Arc::new(ClientIoHandler { @@ -232,15 +247,25 @@ mod tests { #[test] fn it_can_be_started() { let temp_path = RandomTempPath::new(); - let mut path = temp_path.as_path().to_owned(); - path.push("pruning"); - path.push("db"); + let path = temp_path.as_path().to_owned(); + let client_path = { + let mut path = path.to_owned(); + path.push("client"); + path + }; + + let snapshot_path = { + let mut path = path.to_owned(); + path.push("snapshot"); + path + }; let spec = get_test_spec(); let service = ClientService::start( ClientConfig::default(), &spec, - &path, + &client_path, + &snapshot_path, &path, Arc::new(Miner::with_spec(&spec)), ); diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index 4dbbaa1d4..2a186378f 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -19,7 +19,7 @@ use std::collections::HashSet; use std::io::ErrorKind; use std::fs; -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use std::sync::Arc; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; @@ -32,7 +32,6 @@ use engines::Engine; use error::Error; use ids::BlockID; use service::ClientIoMessage; -use spec::Spec; use io::IoChannel; @@ -81,6 +80,7 @@ struct RestorationParams<'a> { manifest: ManifestData, // manifest to base restoration on. pruning: Algorithm, // pruning algorithm for the database. db_path: PathBuf, // database path + db_config: &'a DatabaseConfig, writer: LooseWriter, // writer for recovered snapshot. genesis: &'a [u8], // genesis block of the chain. guard: Guard, // guard for the restoration directory. @@ -94,8 +94,7 @@ impl Restoration { let state_chunks = manifest.state_hashes.iter().cloned().collect(); let block_chunks = manifest.block_hashes.iter().cloned().collect(); - let cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS); - let raw_db = Arc::new(try!(Database::open(&cfg, &*params.db_path.to_string_lossy()) + let raw_db = Arc::new(try!(Database::open(params.db_config, &*params.db_path.to_string_lossy()) .map_err(UtilError::SimpleString))); let chain = BlockChain::new(Default::default(), params.genesis, raw_db.clone()); @@ -173,15 +172,35 @@ impl Restoration { /// Type alias for client io channel. pub type Channel = IoChannel; -/// Service implementation. -/// -/// This will replace the client's state DB as soon as the last state chunk -/// is fed, and will replace the client's blocks DB when the last block chunk -/// is fed. +/// Snapshot service parameters. +pub struct ServiceParams { + /// The consensus engine this is built on. + pub engine: Arc, + /// The chain's genesis block. + pub genesis_block: Bytes, + /// Database configuration options. + pub db_config: DatabaseConfig, + /// State pruning algorithm. + pub pruning: Algorithm, + /// Async IO channel for sending messages. + pub channel: Channel, + /// The directory to put snapshots in. + /// Usually "/snapshot" + pub snapshot_root: PathBuf, + /// The client's database directory. + /// Usually "//db". + pub client_db: PathBuf, + /// A handle for database restoration. + pub db_restore: Arc, +} + +/// `SnapshotService` implementation. +/// This controls taking snapshots and restoring from them. pub struct Service { restoration: Mutex>, - client_db: PathBuf, // "//db" - db_path: PathBuf, // "/" + client_db: PathBuf, + snapshot_root: PathBuf, + db_config: DatabaseConfig, io_channel: Channel, pruning: Algorithm, status: Mutex, @@ -196,38 +215,28 @@ pub struct Service { } impl Service { - /// Create a new snapshot service. - pub fn new(spec: &Spec, pruning: Algorithm, client_db: PathBuf, io_channel: Channel, db_restore: Arc) -> Result { - let db_path = try!(client_db.parent().and_then(Path::parent) - .ok_or_else(|| UtilError::SimpleString("Failed to find database root.".into()))).to_owned(); - - let reader = { - let mut snapshot_path = db_path.clone(); - snapshot_path.push("snapshot"); - snapshot_path.push("current"); - - LooseReader::new(snapshot_path).ok() - }; - - let service = Service { + /// Create a new snapshot service from the given parameters. + pub fn new(params: ServiceParams) -> Result { + let mut service = Service { restoration: Mutex::new(None), - client_db: client_db, - db_path: db_path, - io_channel: io_channel, - pruning: pruning, + client_db: params.client_db, + snapshot_root: params.snapshot_root, + db_config: params.db_config, + io_channel: params.channel, + pruning: params.pruning, status: Mutex::new(RestorationStatus::Inactive), - reader: RwLock::new(reader), - engine: spec.engine.clone(), - genesis_block: spec.genesis_block(), + reader: RwLock::new(None), + engine: params.engine, + genesis_block: params.genesis_block, state_chunks: AtomicUsize::new(0), block_chunks: AtomicUsize::new(0), - db_restore: db_restore, + db_restore: params.db_restore, progress: Default::default(), taking_snapshot: AtomicBool::new(false), }; // create the root snapshot dir if it doesn't exist. - if let Err(e) = fs::create_dir_all(service.root_dir()) { + if let Err(e) = fs::create_dir_all(&service.snapshot_root) { if e.kind() != ErrorKind::AlreadyExists { return Err(e.into()) } @@ -247,33 +256,29 @@ impl Service { } } - Ok(service) - } + let reader = LooseReader::new(service.snapshot_dir()).ok(); + *service.reader.get_mut() = reader; - // get the root path. - fn root_dir(&self) -> PathBuf { - let mut dir = self.db_path.clone(); - dir.push("snapshot"); - dir + Ok(service) } // get the current snapshot dir. fn snapshot_dir(&self) -> PathBuf { - let mut dir = self.root_dir(); + let mut dir = self.snapshot_root.clone(); dir.push("current"); dir } // get the temporary snapshot dir. fn temp_snapshot_dir(&self) -> PathBuf { - let mut dir = self.root_dir(); + let mut dir = self.snapshot_root.clone(); dir.push("in_progress"); dir } // get the restoration directory. fn restoration_dir(&self) -> PathBuf { - let mut dir = self.root_dir(); + let mut dir = self.snapshot_root.clone(); dir.push("restoration"); dir } @@ -377,6 +382,7 @@ impl Service { manifest: manifest, pruning: self.pruning, db_path: self.restoration_db(), + db_config: &self.db_config, writer: writer, genesis: &self.genesis_block, guard: Guard::new(rest_dir), @@ -564,19 +570,26 @@ mod tests { #[test] fn sends_async_messages() { let service = IoService::::start().unwrap(); + let spec = get_test_spec(); let dir = RandomTempPath::new(); let mut dir = dir.as_path().to_owned(); - dir.push("pruning"); - dir.push("db"); + let mut client_db = dir.clone(); + dir.push("snapshot"); + client_db.push("client"); - let service = Service::new( - &get_test_spec(), - Algorithm::Archive, - dir, - service.channel(), - Arc::new(NoopDBRestore), - ).unwrap(); + let snapshot_params = ServiceParams { + engine: spec.engine.clone(), + genesis_block: spec.genesis_block(), + db_config: Default::default(), + pruning: Algorithm::Archive, + channel: service.channel(), + snapshot_root: dir, + client_db: client_db, + db_restore: Arc::new(NoopDBRestore), + }; + + let service = Service::new(snapshot_params).unwrap(); assert!(service.manifest().is_none()); assert!(service.chunk(Default::default()).is_none()); diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index 99aae1078..ff4e09dc9 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -28,7 +28,16 @@ use rlp::{Rlp, View}; fn imports_from_empty() { let dir = RandomTempPath::new(); let spec = get_test_spec(); - let client = Client::new(ClientConfig::default(), &spec, dir.as_path(), Arc::new(Miner::with_spec(&spec)), IoChannel::disconnected()).unwrap(); + let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + + let client = Client::new( + ClientConfig::default(), + &spec, + dir.as_path(), + Arc::new(Miner::with_spec(&spec)), + IoChannel::disconnected(), + &db_config + ).unwrap(); client.import_verified_blocks(); client.flush_queue(); } @@ -37,7 +46,16 @@ fn imports_from_empty() { fn should_return_registrar() { let dir = RandomTempPath::new(); let spec = ethereum::new_morden(); - let client = Client::new(ClientConfig::default(), &spec, dir.as_path(), Arc::new(Miner::with_spec(&spec)), IoChannel::disconnected()).unwrap(); + let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + + let client = Client::new( + ClientConfig::default(), + &spec, + dir.as_path(), + Arc::new(Miner::with_spec(&spec)), + IoChannel::disconnected(), + &db_config + ).unwrap(); assert_eq!(client.additional_params().get("registrar"), Some(&"8e4e9b13d4b45cb0befc93c3061b1408f67316b2".to_owned())); } @@ -55,7 +73,16 @@ fn returns_state_root_basic() { fn imports_good_block() { let dir = RandomTempPath::new(); let spec = get_test_spec(); - let client = Client::new(ClientConfig::default(), &spec, dir.as_path(), Arc::new(Miner::with_spec(&spec)), IoChannel::disconnected()).unwrap(); + let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + + let client = Client::new( + ClientConfig::default(), + &spec, + dir.as_path(), + Arc::new(Miner::with_spec(&spec)), + IoChannel::disconnected(), + &db_config + ).unwrap(); let good_block = get_good_dummy_block(); if let Err(_) = client.import_block(good_block) { panic!("error importing block being good by definition"); @@ -71,8 +98,16 @@ fn imports_good_block() { fn query_none_block() { let dir = RandomTempPath::new(); let spec = get_test_spec(); - let client = Client::new(ClientConfig::default(), &spec, dir.as_path(), Arc::new(Miner::with_spec(&spec)), IoChannel::disconnected()).unwrap(); + let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + let client = Client::new( + ClientConfig::default(), + &spec, + dir.as_path(), + Arc::new(Miner::with_spec(&spec)), + IoChannel::disconnected(), + &db_config + ).unwrap(); let non_existant = client.block_header(BlockID::Number(188)); assert!(non_existant.is_none()); } diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index e05c82c55..c1f99f434 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -133,9 +133,17 @@ pub fn generate_dummy_client_with_data(block_number: u32, txs_per_block: usize, pub fn generate_dummy_client_with_spec_and_data(get_test_spec: F, block_number: u32, txs_per_block: usize, tx_gas_prices: &[U256]) -> GuardedTempResult> where F: Fn()->Spec { let dir = RandomTempPath::new(); - let test_spec = get_test_spec(); - let client = Client::new(ClientConfig::default(), &test_spec, dir.as_path(), Arc::new(Miner::with_spec(&test_spec)), IoChannel::disconnected()).unwrap(); + let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + + let client = Client::new( + ClientConfig::default(), + &test_spec, + dir.as_path(), + Arc::new(Miner::with_spec(&test_spec)), + IoChannel::disconnected(), + &db_config + ).unwrap(); let test_engine = &*test_spec.engine; let mut db_result = get_temp_journal_db(); @@ -233,7 +241,17 @@ pub fn push_blocks_to_client(client: &Arc, timestamp_salt: u64, starting pub fn get_test_client_with_blocks(blocks: Vec) -> GuardedTempResult> { let dir = RandomTempPath::new(); let test_spec = get_test_spec(); - let client = Client::new(ClientConfig::default(), &test_spec, dir.as_path(), Arc::new(Miner::with_spec(&test_spec)), IoChannel::disconnected()).unwrap(); + let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + + let client = Client::new( + ClientConfig::default(), + &test_spec, + dir.as_path(), + Arc::new(Miner::with_spec(&test_spec)), + IoChannel::disconnected(), + &db_config + ).unwrap(); + for block in &blocks { if let Err(_) = client.import_block(block.clone()) { panic!("panic importing block which is well-formed"); diff --git a/ethcore/src/tests/rpc.rs b/ethcore/src/tests/rpc.rs index afd2cd6a7..202e42988 100644 --- a/ethcore/src/tests/rpc.rs +++ b/ethcore/src/tests/rpc.rs @@ -25,18 +25,23 @@ use devtools::*; use miner::Miner; use crossbeam; use io::IoChannel; +use util::kvdb::DatabaseConfig; pub fn run_test_worker(scope: &crossbeam::Scope, stop: Arc, socket_path: &str) { let socket_path = socket_path.to_owned(); scope.spawn(move || { let temp = RandomTempPath::create_dir(); let spec = get_test_spec(); + let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + let client = Client::new( ClientConfig::default(), &spec, temp.as_path(), Arc::new(Miner::with_spec(&spec)), - IoChannel::disconnected()).unwrap(); + IoChannel::disconnected(), + &db_config + ).unwrap(); let mut worker = nanoipc::Worker::new(&(client as Arc)); worker.add_reqrep(&socket_path).unwrap(); while !stop.load(Ordering::Relaxed) { diff --git a/parity/blockchain.rs b/parity/blockchain.rs index 0dd11b976..ccdf61130 100644 --- a/parity/blockchain.rs +++ b/parity/blockchain.rs @@ -19,7 +19,6 @@ use std::{io, fs}; use std::io::{BufReader, BufRead}; use std::time::Duration; use std::thread::sleep; -use std::path::Path; use std::sync::Arc; use rustc_serialize::hex::FromHex; use ethcore_logger::{setup_log, Config as LogConfig}; @@ -125,8 +124,9 @@ fn execute_import(cmd: ImportBlockchain) -> Result { // select pruning algorithm let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref()); - // prepare client_path + // prepare client and snapshot paths. let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm); + let snapshot_path = cmd.dirs.snapshot_path(genesis_hash, spec.fork_name.as_ref()); // execute upgrades try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile())); @@ -138,8 +138,9 @@ fn execute_import(cmd: ImportBlockchain) -> Result { let service = try!(ClientService::start( client_config, &spec, - Path::new(&client_path), - Path::new(&cmd.dirs.ipc_path()), + &client_path, + &snapshot_path, + &cmd.dirs.ipc_path(), Arc::new(Miner::with_spec(&spec)), ).map_err(|e| format!("Client service error: {:?}", e))); @@ -237,8 +238,9 @@ fn execute_export(cmd: ExportBlockchain) -> Result { // select pruning algorithm let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref()); - // prepare client_path + // prepare client and snapshot paths. let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm); + let snapshot_path = cmd.dirs.snapshot_path(genesis_hash, spec.fork_name.as_ref()); // execute upgrades try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile())); @@ -249,8 +251,9 @@ fn execute_export(cmd: ExportBlockchain) -> Result { let service = try!(ClientService::start( client_config, &spec, - Path::new(&client_path), - Path::new(&cmd.dirs.ipc_path()), + &client_path, + &snapshot_path, + &cmd.dirs.ipc_path(), Arc::new(Miner::with_spec(&spec)), ).map_err(|e| format!("Client service error: {:?}", e))); diff --git a/parity/dir.rs b/parity/dir.rs index f1f230163..d31e81e2c 100644 --- a/parity/dir.rs +++ b/parity/dir.rs @@ -52,10 +52,16 @@ impl Directories { Ok(()) } - /// Get the root path for database - pub fn db_version_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf { + /// Get the chain's root path. + pub fn chain_path(&self, genesis_hash: H256, fork_name: Option<&String>) -> PathBuf { let mut dir = Path::new(&self.db).to_path_buf(); dir.push(format!("{:?}{}", H64::from(genesis_hash), fork_name.map(|f| format!("-{}", f)).unwrap_or_default())); + dir + } + + /// Get the root path for database + pub fn db_version_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf { + let mut dir = self.chain_path(genesis_hash, fork_name); dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str())); dir } @@ -67,6 +73,13 @@ impl Directories { dir } + /// Get the path for the snapshot directory given the genesis hash and fork name. + pub fn snapshot_path(&self, genesis_hash: H256, fork_name: Option<&String>) -> PathBuf { + let mut dir = self.chain_path(genesis_hash, fork_name); + dir.push("snapshot"); + dir + } + /// Get the ipc sockets path pub fn ipc_path(&self) -> PathBuf { let mut dir = Path::new(&self.db).to_path_buf(); diff --git a/parity/run.rs b/parity/run.rs index c6571b9b2..720e6f1bf 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -15,7 +15,6 @@ // along with Parity. If not, see . use std::sync::{Arc, Mutex, Condvar}; -use std::path::Path; use std::io::ErrorKind; use ctrlc::CtrlC; use fdlimit::raise_fd_limit; @@ -110,8 +109,9 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { // select pruning algorithm let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, fork_name.as_ref()); - // prepare client_path + // prepare client and snapshot paths. let client_path = cmd.dirs.client_path(genesis_hash, fork_name.as_ref(), algorithm); + let snapshot_path = cmd.dirs.snapshot_path(genesis_hash, fork_name.as_ref()); // execute upgrades try!(execute_upgrades(&cmd.dirs, genesis_hash, fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile())); @@ -171,14 +171,15 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { } // create supervisor - let mut hypervisor = modules::hypervisor(Path::new(&cmd.dirs.ipc_path())); + let mut hypervisor = modules::hypervisor(&cmd.dirs.ipc_path()); // create client service. let service = try!(ClientService::start( client_config, &spec, - Path::new(&client_path), - Path::new(&cmd.dirs.ipc_path()), + &client_path, + &snapshot_path, + &cmd.dirs.ipc_path(), miner.clone(), ).map_err(|e| format!("Client service error: {:?}", e))); diff --git a/parity/snapshot.rs b/parity/snapshot.rs index 5bf5024ae..8c0bdd8fc 100644 --- a/parity/snapshot.rs +++ b/parity/snapshot.rs @@ -82,8 +82,9 @@ impl SnapshotCommand { // select pruning algorithm let algorithm = self.pruning.to_algorithm(&self.dirs, genesis_hash, spec.fork_name.as_ref()); - // prepare client_path + // prepare client and snapshot paths. let client_path = self.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm); + let snapshot_path = self.dirs.snapshot_path(genesis_hash, spec.fork_name.as_ref()); // execute upgrades try!(execute_upgrades(&self.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, self.compaction.compaction_profile())); @@ -94,8 +95,9 @@ impl SnapshotCommand { let service = try!(ClientService::start( client_config, &spec, - Path::new(&client_path), - Path::new(&self.dirs.ipc_path()), + &client_path, + &snapshot_path, + &self.dirs.ipc_path(), Arc::new(Miner::with_spec(&spec)) ).map_err(|e| format!("Client service error: {:?}", e))); diff --git a/rpc/src/v1/tests/eth.rs b/rpc/src/v1/tests/eth.rs index d76ac01d6..448fa4734 100644 --- a/rpc/src/v1/tests/eth.rs +++ b/rpc/src/v1/tests/eth.rs @@ -108,7 +108,16 @@ impl EthTester { let dir = RandomTempPath::new(); let account_provider = account_provider(); let miner_service = miner_service(&spec, account_provider.clone()); - let client = Client::new(ClientConfig::default(), &spec, dir.as_path(), miner_service.clone(), IoChannel::disconnected()).unwrap(); + + let db_config = ::util::kvdb::DatabaseConfig::with_columns(::ethcore::db::NUM_COLUMNS); + let client = Client::new( + ClientConfig::default(), + &spec, + dir.as_path(), + miner_service.clone(), + IoChannel::disconnected(), + &db_config + ).unwrap(); let sync_provider = sync_provider(); let external_miner = Arc::new(ExternalMiner::default());