Merge branch 'master' of github.com:ethcore/parity into move_hash

This commit is contained in:
debris 2016-08-06 00:03:07 +02:00
commit 88c5f555a9
51 changed files with 2027 additions and 322 deletions

1
Cargo.lock generated
View File

@ -259,6 +259,7 @@ dependencies = [
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",

View File

@ -34,6 +34,7 @@ ethjson = { path = "../json" }
ethcore-ipc = { path = "../ipc/rpc" } ethcore-ipc = { path = "../ipc/rpc" }
ethstore = { path = "../ethstore" } ethstore = { path = "../ethstore" }
ethcore-ipc-nano = { path = "../ipc/nano" } ethcore-ipc-nano = { path = "../ipc/nano" }
rand = "0.3"
[dependencies.hyper] [dependencies.hyper]
git = "https://github.com/ethcore/hyper" git = "https://github.com/ethcore/hyper"

View File

@ -18,7 +18,8 @@
use std::fmt; use std::fmt;
use std::collections::HashMap; use std::collections::HashMap;
use util::{Address as H160, H256, H520, RwLock}; use std::time::{Instant, Duration};
use util::{Address as H160, H256, H520, Mutex, RwLock};
use ethstore::{SecretStore, Error as SSError, SafeAccount, EthStore}; use ethstore::{SecretStore, Error as SSError, SafeAccount, EthStore};
use ethstore::dir::{KeyDirectory}; use ethstore::dir::{KeyDirectory};
use ethstore::ethkey::{Address as SSAddress, Message as SSMessage, Secret as SSSecret, Random, Generator}; use ethstore::ethkey::{Address as SSAddress, Message as SSMessage, Secret as SSSecret, Random, Generator};
@ -32,6 +33,8 @@ enum Unlock {
/// Account unlocked permantently can always sign message. /// Account unlocked permantently can always sign message.
/// Use with caution. /// Use with caution.
Perm, Perm,
/// Account unlocked with a timeout
Timed((Instant, u32)),
} }
/// Data associated with account. /// Data associated with account.
@ -131,7 +134,7 @@ impl KeyDirectory for NullDir {
/// Account management. /// Account management.
/// Responsible for unlocking accounts. /// Responsible for unlocking accounts.
pub struct AccountProvider { pub struct AccountProvider {
unlocked: RwLock<HashMap<SSAddress, AccountData>>, unlocked: Mutex<HashMap<SSAddress, AccountData>>,
sstore: Box<SecretStore>, sstore: Box<SecretStore>,
} }
@ -160,7 +163,7 @@ impl AccountProvider {
/// Creates new account provider. /// Creates new account provider.
pub fn new(sstore: Box<SecretStore>) -> Self { pub fn new(sstore: Box<SecretStore>) -> Self {
AccountProvider { AccountProvider {
unlocked: RwLock::new(HashMap::new()), unlocked: Mutex::new(HashMap::new()),
sstore: sstore, sstore: sstore,
} }
} }
@ -168,7 +171,7 @@ impl AccountProvider {
/// Creates not disk backed provider. /// Creates not disk backed provider.
pub fn transient_provider() -> Self { pub fn transient_provider() -> Self {
AccountProvider { AccountProvider {
unlocked: RwLock::new(HashMap::new()), unlocked: Mutex::new(HashMap::new()),
sstore: Box::new(EthStore::open(Box::new(NullDir::default())).unwrap()) sstore: Box::new(EthStore::open(Box::new(NullDir::default())).unwrap())
} }
} }
@ -236,21 +239,18 @@ impl AccountProvider {
let _ = try!(self.sstore.sign(&account, &password, &Default::default())); let _ = try!(self.sstore.sign(&account, &password, &Default::default()));
// check if account is already unlocked pernamently, if it is, do nothing // check if account is already unlocked pernamently, if it is, do nothing
{ let mut unlocked = self.unlocked.lock();
let unlocked = self.unlocked.read();
if let Some(data) = unlocked.get(&account) { if let Some(data) = unlocked.get(&account) {
if let Unlock::Perm = data.unlock { if let Unlock::Perm = data.unlock {
return Ok(()) return Ok(())
} }
} }
}
let data = AccountData { let data = AccountData {
unlock: unlock, unlock: unlock,
password: password, password: password,
}; };
let mut unlocked = self.unlocked.write();
unlocked.insert(account, data); unlocked.insert(account, data);
Ok(()) Ok(())
} }
@ -265,10 +265,15 @@ impl AccountProvider {
self.unlock_account(account, password, Unlock::Temp) self.unlock_account(account, password, Unlock::Temp)
} }
/// Unlocks account temporarily with a timeout.
pub fn unlock_account_timed<A>(&self, account: A, password: String, duration_ms: u32) -> Result<(), Error> where Address: From<A> {
self.unlock_account(account, password, Unlock::Timed((Instant::now(), duration_ms)))
}
/// Checks if given account is unlocked /// Checks if given account is unlocked
pub fn is_unlocked<A>(&self, account: A) -> bool where Address: From<A> { pub fn is_unlocked<A>(&self, account: A) -> bool where Address: From<A> {
let account = Address::from(account).into(); let account = Address::from(account).into();
let unlocked = self.unlocked.read(); let unlocked = self.unlocked.lock();
unlocked.get(&account).is_some() unlocked.get(&account).is_some()
} }
@ -278,14 +283,19 @@ impl AccountProvider {
let message = Message::from(message).into(); let message = Message::from(message).into();
let data = { let data = {
let unlocked = self.unlocked.read(); let mut unlocked = self.unlocked.lock();
try!(unlocked.get(&account).ok_or(Error::NotUnlocked)).clone() let data = try!(unlocked.get(&account).ok_or(Error::NotUnlocked)).clone();
};
if let Unlock::Temp = data.unlock { if let Unlock::Temp = data.unlock {
let mut unlocked = self.unlocked.write();
unlocked.remove(&account).expect("data exists: so key must exist: qed"); unlocked.remove(&account).expect("data exists: so key must exist: qed");
} }
if let Unlock::Timed((ref start, ref duration)) = data.unlock {
if start.elapsed() > Duration::from_millis(*duration as u64) {
unlocked.remove(&account).expect("data exists: so key must exist: qed");
return Err(Error::NotUnlocked);
}
}
data
};
let signature = try!(self.sstore.sign(&account, &data.password, &message)); let signature = try!(self.sstore.sign(&account, &data.password, &message));
Ok(H520(signature.into())) Ok(H520(signature.into()))
@ -304,6 +314,7 @@ impl AccountProvider {
mod tests { mod tests {
use super::AccountProvider; use super::AccountProvider;
use ethstore::ethkey::{Generator, Random}; use ethstore::ethkey::{Generator, Random};
use std::time::Duration;
#[test] #[test]
fn unlock_account_temp() { fn unlock_account_temp() {
@ -329,4 +340,16 @@ mod tests {
assert!(ap.sign(kp.address(), [0u8; 32]).is_ok()); assert!(ap.sign(kp.address(), [0u8; 32]).is_ok());
assert!(ap.sign(kp.address(), [0u8; 32]).is_ok()); assert!(ap.sign(kp.address(), [0u8; 32]).is_ok());
} }
#[test]
fn unlock_account_timer() {
let kp = Random.generate().unwrap();
let ap = AccountProvider::transient_provider();
assert!(ap.insert_account(kp.secret().clone(), "test").is_ok());
assert!(ap.unlock_account_timed(kp.address(), "test1".into(), 2000).is_err());
assert!(ap.unlock_account_timed(kp.address(), "test".into(), 2000).is_ok());
assert!(ap.sign(kp.address(), [0u8; 32]).is_ok());
::std::thread::sleep(Duration::from_millis(2000));
assert!(ap.sign(kp.address(), [0u8; 32]).is_err());
}
} }

View File

@ -40,7 +40,7 @@ impl Block {
UntrustedRlp::new(b).as_val::<Block>().is_ok() UntrustedRlp::new(b).as_val::<Block>().is_ok()
} }
/// Get the RLP-encoding of the block without the seal. /// Get the RLP-encoding of the block with or without the seal.
pub fn rlp_bytes(&self, seal: Seal) -> Bytes { pub fn rlp_bytes(&self, seal: Seal) -> Bytes {
let mut block_rlp = RlpStream::new_list(3); let mut block_rlp = RlpStream::new_list(3);
self.header.stream_rlp(&mut block_rlp, seal); self.header.stream_rlp(&mut block_rlp, seal);

View File

@ -80,7 +80,7 @@ impl BlockQueueInfo {
/// Sorts them ready for blockchain insertion. /// Sorts them ready for blockchain insertion.
pub struct BlockQueue { pub struct BlockQueue {
panic_handler: Arc<PanicHandler>, panic_handler: Arc<PanicHandler>,
engine: Arc<Box<Engine>>, engine: Arc<Engine>,
more_to_verify: Arc<SCondvar>, more_to_verify: Arc<SCondvar>,
verification: Arc<Verification>, verification: Arc<Verification>,
verifiers: Vec<JoinHandle<()>>, verifiers: Vec<JoinHandle<()>>,
@ -140,7 +140,7 @@ struct Verification {
impl BlockQueue { impl BlockQueue {
/// Creates a new queue instance. /// Creates a new queue instance.
pub fn new(config: BlockQueueConfig, engine: Arc<Box<Engine>>, message_channel: IoChannel<ClientIoMessage>) -> BlockQueue { pub fn new(config: BlockQueueConfig, engine: Arc<Engine>, message_channel: IoChannel<ClientIoMessage>) -> BlockQueue {
let verification = Arc::new(Verification { let verification = Arc::new(Verification {
unverified: Mutex::new(VecDeque::new()), unverified: Mutex::new(VecDeque::new()),
verified: Mutex::new(VecDeque::new()), verified: Mutex::new(VecDeque::new()),
@ -196,7 +196,7 @@ impl BlockQueue {
} }
} }
fn verify(verification: Arc<Verification>, engine: Arc<Box<Engine>>, wait: Arc<SCondvar>, ready: Arc<QueueSignal>, deleting: Arc<AtomicBool>, empty: Arc<SCondvar>) { fn verify(verification: Arc<Verification>, engine: Arc<Engine>, wait: Arc<SCondvar>, ready: Arc<QueueSignal>, deleting: Arc<AtomicBool>, empty: Arc<SCondvar>) {
while !deleting.load(AtomicOrdering::Acquire) { while !deleting.load(AtomicOrdering::Acquire) {
{ {
let mut more_to_verify = verification.more_to_verify.lock().unwrap(); let mut more_to_verify = verification.more_to_verify.lock().unwrap();
@ -226,7 +226,7 @@ impl BlockQueue {
}; };
let block_hash = block.header.hash(); let block_hash = block.header.hash();
match verify_block_unordered(block.header, block.bytes, &**engine) { match verify_block_unordered(block.header, block.bytes, &*engine) {
Ok(verified) => { Ok(verified) => {
let mut verifying = verification.verifying.lock(); let mut verifying = verification.verifying.lock();
for e in verifying.iter_mut() { for e in verifying.iter_mut() {
@ -319,7 +319,7 @@ impl BlockQueue {
} }
} }
match verify_block_basic(&header, &bytes, &**self.engine) { match verify_block_basic(&header, &bytes, &*self.engine) {
Ok(()) => { Ok(()) => {
self.processing.write().insert(h.clone()); self.processing.write().insert(h.clone());
self.verification.unverified.lock().push_back(UnverifiedBlock { header: header, bytes: bytes }); self.verification.unverified.lock().push_back(UnverifiedBlock { header: header, bytes: bytes });
@ -340,7 +340,7 @@ impl BlockQueue {
return; return;
} }
let mut verified_lock = self.verification.verified.lock(); let mut verified_lock = self.verification.verified.lock();
let mut verified = verified_lock.deref_mut(); let mut verified = &mut *verified_lock;
let mut bad = self.verification.bad.lock(); let mut bad = self.verification.bad.lock();
let mut processing = self.processing.write(); let mut processing = self.processing.write();
bad.reserve(block_hashes.len()); bad.reserve(block_hashes.len());
@ -460,7 +460,7 @@ mod tests {
fn get_test_queue() -> BlockQueue { fn get_test_queue() -> BlockQueue {
let spec = get_test_spec(); let spec = get_test_spec();
let engine = spec.engine; let engine = spec.engine;
BlockQueue::new(BlockQueueConfig::default(), Arc::new(engine), IoChannel::disconnected()) BlockQueue::new(BlockQueueConfig::default(), engine, IoChannel::disconnected())
} }
#[test] #[test]
@ -468,7 +468,7 @@ mod tests {
// TODO better test // TODO better test
let spec = Spec::new_test(); let spec = Spec::new_test();
let engine = spec.engine; let engine = spec.engine;
let _ = BlockQueue::new(BlockQueueConfig::default(), Arc::new(engine), IoChannel::disconnected()); let _ = BlockQueue::new(BlockQueueConfig::default(), engine, IoChannel::disconnected());
} }
#[test] #[test]
@ -531,7 +531,7 @@ mod tests {
let engine = spec.engine; let engine = spec.engine;
let mut config = BlockQueueConfig::default(); let mut config = BlockQueueConfig::default();
config.max_mem_use = super::MIN_MEM_LIMIT; // empty queue uses about 15000 config.max_mem_use = super::MIN_MEM_LIMIT; // empty queue uses about 15000
let queue = BlockQueue::new(config, Arc::new(engine), IoChannel::disconnected()); let queue = BlockQueue::new(config, engine, IoChannel::disconnected());
assert!(!queue.queue_info().is_full()); assert!(!queue.queue_info().is_full());
let mut blocks = get_good_dummy_block_seq(50); let mut blocks = get_good_dummy_block_seq(50);
for b in blocks.drain(..) { for b in blocks.drain(..) {

View File

@ -533,6 +533,116 @@ impl BlockChain {
} }
} }
/// Inserts a verified, known block from the canonical chain.
///
/// Can be performed out-of-order, but care must be taken that the final chain is in a correct state.
/// This is used by snapshot restoration.
///
/// Supply a dummy parent total difficulty when the parent block may not be in the chain.
/// Returns true if the block is disconnected.
pub fn insert_snapshot_block(&self, bytes: &[u8], receipts: Vec<Receipt>, parent_td: Option<U256>, is_best: bool) -> bool {
let block = BlockView::new(bytes);
let header = block.header_view();
let hash = header.sha3();
if self.is_known(&hash) {
return false;
}
assert!(self.pending_best_block.read().is_none());
let batch = self.db.transaction();
let block_rlp = UntrustedRlp::new(bytes);
let compressed_header = block_rlp.at(0).unwrap().compress(RlpType::Blocks);
let compressed_body = UntrustedRlp::new(&Self::block_to_body(bytes)).compress(RlpType::Blocks);
// store block in db
batch.put(DB_COL_HEADERS, &hash, &compressed_header).unwrap();
batch.put(DB_COL_BODIES, &hash, &compressed_body).unwrap();
let maybe_parent = self.block_details(&header.parent_hash());
if let Some(parent_details) = maybe_parent {
// parent known to be in chain.
let info = BlockInfo {
hash: hash,
number: header.number(),
total_difficulty: parent_details.total_difficulty + header.difficulty(),
location: BlockLocation::CanonChain,
};
self.prepare_update(&batch, ExtrasUpdate {
block_hashes: self.prepare_block_hashes_update(bytes, &info),
block_details: self.prepare_block_details_update(bytes, &info),
block_receipts: self.prepare_block_receipts_update(receipts, &info),
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
blocks_blooms: self.prepare_block_blooms_update(bytes, &info),
info: info,
block: bytes
}, is_best);
self.db.write(batch).unwrap();
false
} else {
// parent not in the chain yet. we need the parent difficulty to proceed.
let d = parent_td
.expect("parent total difficulty always supplied for first block in chunk. only first block can have missing parent; qed");
let info = BlockInfo {
hash: hash,
number: header.number(),
total_difficulty: d + header.difficulty(),
location: BlockLocation::CanonChain,
};
let block_details = BlockDetails {
number: header.number(),
total_difficulty: info.total_difficulty,
parent: header.parent_hash(),
children: Vec::new(),
};
let mut update = HashMap::new();
update.insert(hash, block_details);
self.prepare_update(&batch, ExtrasUpdate {
block_hashes: self.prepare_block_hashes_update(bytes, &info),
block_details: update,
block_receipts: self.prepare_block_receipts_update(receipts, &info),
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
blocks_blooms: self.prepare_block_blooms_update(bytes, &info),
info: info,
block: bytes,
}, is_best);
self.db.write(batch).unwrap();
true
}
}
/// Add a child to a given block. Assumes that the block hash is in
/// the chain and the child's parent is this block.
///
/// Used in snapshots to glue the chunks together at the end.
pub fn add_child(&self, block_hash: H256, child_hash: H256) {
let mut parent_details = self.block_details(&block_hash)
.unwrap_or_else(|| panic!("Invalid block hash: {:?}", block_hash));
let batch = self.db.transaction();
parent_details.children.push(child_hash);
let mut update = HashMap::new();
update.insert(block_hash, parent_details);
self.note_used(CacheID::BlockDetails(block_hash));
let mut write_details = self.block_details.write();
batch.extend_with_cache(DB_COL_EXTRA, &mut *write_details, update, CacheUpdatePolicy::Overwrite);
self.db.write(batch).unwrap();
}
#[cfg_attr(feature="dev", allow(similar_names))] #[cfg_attr(feature="dev", allow(similar_names))]
/// Inserts the block into backing cache database. /// Inserts the block into backing cache database.
/// Expects the block to be valid and already verified. /// Expects the block to be valid and already verified.
@ -572,7 +682,7 @@ impl BlockChain {
blocks_blooms: self.prepare_block_blooms_update(bytes, &info), blocks_blooms: self.prepare_block_blooms_update(bytes, &info),
info: info.clone(), info: info.clone(),
block: bytes, block: bytes,
}); }, true);
ImportRoute::from(info) ImportRoute::from(info)
} }
@ -618,7 +728,7 @@ impl BlockChain {
} }
/// Prepares extras update. /// Prepares extras update.
fn prepare_update(&self, batch: &DBTransaction, update: ExtrasUpdate) { fn prepare_update(&self, batch: &DBTransaction, update: ExtrasUpdate, is_best: bool) {
{ {
for hash in update.block_details.keys().cloned() { for hash in update.block_details.keys().cloned() {
self.note_used(CacheID::BlockDetails(hash)); self.note_used(CacheID::BlockDetails(hash));
@ -645,7 +755,7 @@ impl BlockChain {
// update best block // update best block
match update.info.location { match update.info.location {
BlockLocation::Branch => (), BlockLocation::Branch => (),
_ => { _ => if is_best {
batch.put(DB_COL_EXTRA, b"best", &update.info.hash).unwrap(); batch.put(DB_COL_EXTRA, b"best", &update.info.hash).unwrap();
*best_block = Some(BestBlock { *best_block = Some(BestBlock {
hash: update.info.hash, hash: update.info.hash,
@ -653,9 +763,8 @@ impl BlockChain {
total_difficulty: update.info.total_difficulty, total_difficulty: update.info.total_difficulty,
block: update.block.to_vec(), block: update.block.to_vec(),
}); });
},
} }
}
let mut write_hashes = self.pending_block_hashes.write(); let mut write_hashes = self.pending_block_hashes.write();
let mut write_txs = self.pending_transaction_addresses.write(); let mut write_txs = self.pending_transaction_addresses.write();
@ -745,6 +854,7 @@ impl BlockChain {
} }
/// This function returns modified block details. /// This function returns modified block details.
/// Uses the given parent details or attempts to load them from the database.
fn prepare_block_details_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap<H256, BlockDetails> { fn prepare_block_details_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap<H256, BlockDetails> {
let block = BlockView::new(block_bytes); let block = BlockView::new(block_bytes);
let header = block.header_view(); let header = block.header_view();

View File

@ -41,7 +41,7 @@ pub enum ExtrasIndex {
fn with_index(hash: &H256, i: ExtrasIndex) -> H264 { fn with_index(hash: &H256, i: ExtrasIndex) -> H264 {
let mut result = H264::default(); let mut result = H264::default();
result[0] = i as u8; result[0] = i as u8;
result.deref_mut()[1..].clone_from_slice(hash); (*result)[1..].clone_from_slice(hash);
result result
} }

View File

@ -14,6 +14,8 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Blockchain generator for tests.
mod bloom; mod bloom;
mod block; mod block;
mod complete; mod complete;

View File

@ -26,7 +26,7 @@ mod import_route;
mod update; mod update;
#[cfg(test)] #[cfg(test)]
mod generator; pub mod generator;
pub use self::blockchain::{BlockProvider, BlockChain}; pub use self::blockchain::{BlockProvider, BlockChain};
pub use self::cache::CacheSize; pub use self::cache::CacheSize;

View File

@ -13,7 +13,6 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::collections::{HashSet, HashMap, VecDeque}; use std::collections::{HashSet, HashMap, VecDeque};
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};
use std::path::{Path}; use std::path::{Path};
@ -62,6 +61,7 @@ use trace::FlatTransactionTraces;
use evm::Factory as EvmFactory; use evm::Factory as EvmFactory;
use miner::{Miner, MinerService}; use miner::{Miner, MinerService};
use util::TrieFactory; use util::TrieFactory;
use snapshot::{self, io as snapshot_io};
// re-export // re-export
pub use types::blockchain_info::BlockChainInfo; pub use types::blockchain_info::BlockChainInfo;
@ -119,7 +119,7 @@ pub struct Client {
mode: Mode, mode: Mode,
chain: Arc<BlockChain>, chain: Arc<BlockChain>,
tracedb: Arc<TraceDB<BlockChain>>, tracedb: Arc<TraceDB<BlockChain>>,
engine: Arc<Box<Engine>>, engine: Arc<Engine>,
db: Arc<Database>, db: Arc<Database>,
state_db: Mutex<Box<JournalDB>>, state_db: Mutex<Box<JournalDB>>,
block_queue: BlockQueue, block_queue: BlockQueue,
@ -139,6 +139,7 @@ pub struct Client {
} }
const HISTORY: u64 = 1200; const HISTORY: u64 = 1200;
// database columns // database columns
/// Column for State /// Column for State
pub const DB_COL_STATE: Option<u32> = Some(0); pub const DB_COL_STATE: Option<u32> = Some(0);
@ -164,7 +165,7 @@ impl Client {
/// Create a new client with given spec and DB path and custom verifier. /// Create a new client with given spec and DB path and custom verifier.
pub fn new( pub fn new(
config: ClientConfig, config: ClientConfig,
spec: Spec, spec: &Spec,
path: &Path, path: &Path,
miner: Arc<Miner>, miner: Arc<Miner>,
message_channel: IoChannel<ClientIoMessage>, message_channel: IoChannel<ClientIoMessage>,
@ -191,7 +192,7 @@ impl Client {
warn!("State root not found for block #{} ({})", chain.best_block_number(), chain.best_block_hash().hex()); warn!("State root not found for block #{} ({})", chain.best_block_number(), chain.best_block_hash().hex());
} }
let engine = Arc::new(spec.engine); let engine = spec.engine.clone();
let block_queue = BlockQueue::new(config.queue, engine.clone(), message_channel.clone()); let block_queue = BlockQueue::new(config.queue, engine.clone(), message_channel.clone());
let panic_handler = PanicHandler::new_in_arc(); let panic_handler = PanicHandler::new_in_arc();
@ -270,7 +271,7 @@ impl Client {
} }
fn check_and_close_block(&self, block: &PreverifiedBlock) -> Result<LockedBlock, ()> { fn check_and_close_block(&self, block: &PreverifiedBlock) -> Result<LockedBlock, ()> {
let engine = &**self.engine; let engine = &*self.engine;
let header = &block.header; let header = &block.header;
// Check the block isn't so old we won't be able to enact it. // Check the block isn't so old we won't be able to enact it.
@ -593,6 +594,23 @@ impl Client {
} }
} }
/// Take a snapshot.
pub fn take_snapshot<W: snapshot_io::SnapshotWriter + Send>(&self, writer: W) -> Result<(), ::error::Error> {
let db = self.state_db.lock().boxed_clone();
let best_block_number = self.chain_info().best_block_number;
let start_block_number = if best_block_number > 1000 {
best_block_number - 1000
} else {
0
};
let start_hash = self.block_hash(BlockID::Number(start_block_number))
.expect("blocks within HISTORY are always stored.");
try!(snapshot::take_snapshot(&self.chain, start_hash, db.as_hashdb(), writer));
Ok(())
}
fn block_hash(chain: &BlockChain, id: BlockID) -> Option<H256> { fn block_hash(chain: &BlockChain, id: BlockID) -> Option<H256> {
match id { match id {
BlockID::Hash(hash) => Some(hash), BlockID::Hash(hash) => Some(hash),
@ -665,7 +683,7 @@ impl BlockChainClient for Client {
state.add_balance(&sender, &(needed_balance - balance)); state.add_balance(&sender, &(needed_balance - balance));
} }
let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false }; let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false };
let mut ret = try!(Executive::new(&mut state, &env_info, &**self.engine, &self.vm_factory).transact(t, options)); let mut ret = try!(Executive::new(&mut state, &env_info, &*self.engine, &self.vm_factory).transact(t, options));
// TODO gav move this into Executive. // TODO gav move this into Executive.
ret.state_diff = original_state.map(|original| state.diff_from(original)); ret.state_diff = original_state.map(|original| state.diff_from(original));
@ -697,7 +715,7 @@ impl BlockChainClient for Client {
gas_limit: view.gas_limit(), gas_limit: view.gas_limit(),
}; };
for t in txs.iter().take(address.index) { for t in txs.iter().take(address.index) {
match Executive::new(&mut state, &env_info, &**self.engine, &self.vm_factory).transact(t, Default::default()) { match Executive::new(&mut state, &env_info, &*self.engine, &self.vm_factory).transact(t, Default::default()) {
Ok(x) => { env_info.gas_used = env_info.gas_used + x.gas_used; } Ok(x) => { env_info.gas_used = env_info.gas_used + x.gas_used; }
Err(ee) => { return Err(CallError::Execution(ee)) } Err(ee) => { return Err(CallError::Execution(ee)) }
} }
@ -705,7 +723,7 @@ impl BlockChainClient for Client {
let t = &txs[address.index]; let t = &txs[address.index];
let original_state = if analytics.state_diffing { Some(state.clone()) } else { None }; let original_state = if analytics.state_diffing { Some(state.clone()) } else { None };
let mut ret = try!(Executive::new(&mut state, &env_info, &**self.engine, &self.vm_factory).transact(t, options)); let mut ret = try!(Executive::new(&mut state, &env_info, &*self.engine, &self.vm_factory).transact(t, options));
ret.state_diff = original_state.map(|original| state.diff_from(original)); ret.state_diff = original_state.map(|original| state.diff_from(original));
Ok(ret) Ok(ret)
@ -997,7 +1015,7 @@ impl BlockChainClient for Client {
impl MiningBlockChainClient for Client { impl MiningBlockChainClient for Client {
fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) -> OpenBlock { fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) -> OpenBlock {
let engine = &**self.engine; let engine = &*self.engine;
let h = self.chain.best_block_hash(); let h = self.chain.best_block_hash();
let mut open_block = OpenBlock::new( let mut open_block = OpenBlock::new(

View File

@ -45,7 +45,6 @@ mod traits {
pub mod chain_notify { pub mod chain_notify {
//! Chain notify interface //! Chain notify interface
#![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues #![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues
include!(concat!(env!("OUT_DIR"), "/chain_notify.rs")); include!(concat!(env!("OUT_DIR"), "/chain_notify.rs"));
} }

View File

@ -96,7 +96,7 @@ impl Default for TestBlockChainClient {
impl TestBlockChainClient { impl TestBlockChainClient {
/// Creates new test client. /// Creates new test client.
pub fn new() -> Self { pub fn new() -> Self {
let spec = Spec::new_test();
let mut client = TestBlockChainClient { let mut client = TestBlockChainClient {
blocks: RwLock::new(HashMap::new()), blocks: RwLock::new(HashMap::new()),
numbers: RwLock::new(HashMap::new()), numbers: RwLock::new(HashMap::new()),
@ -110,8 +110,8 @@ impl TestBlockChainClient {
execution_result: RwLock::new(None), execution_result: RwLock::new(None),
receipts: RwLock::new(HashMap::new()), receipts: RwLock::new(HashMap::new()),
queue_size: AtomicUsize::new(0), queue_size: AtomicUsize::new(0),
miner: Arc::new(Miner::with_spec(Spec::new_test())), miner: Arc::new(Miner::with_spec(&spec)),
spec: Spec::new_test(), spec: spec,
vm_factory: EvmFactory::new(VMType::Interpreter), vm_factory: EvmFactory::new(VMType::Interpreter),
}; };
client.add_blocks(1, EachBlockWith::Nothing); // add genesis block client.add_blocks(1, EachBlockWith::Nothing); // add genesis block

View File

@ -23,6 +23,8 @@ use basic_types::LogBloom;
use client::Error as ClientError; use client::Error as ClientError;
use ipc::binary::{BinaryConvertError, BinaryConvertable}; use ipc::binary::{BinaryConvertError, BinaryConvertable};
use types::block_import_error::BlockImportError; use types::block_import_error::BlockImportError;
use snapshot::Error as SnapshotError;
pub use types::executed::{ExecutionError, CallError}; pub use types::executed::{ExecutionError, CallError};
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone)]
@ -234,25 +236,28 @@ pub enum Error {
StdIo(::std::io::Error), StdIo(::std::io::Error),
/// Snappy error. /// Snappy error.
Snappy(::util::snappy::InvalidInput), Snappy(::util::snappy::InvalidInput),
/// Snapshot error.
Snapshot(SnapshotError),
} }
impl fmt::Display for Error { impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self { match *self {
Error::Client(ref err) => f.write_fmt(format_args!("{}", err)), Error::Client(ref err) => err.fmt(f),
Error::Util(ref err) => f.write_fmt(format_args!("{}", err)), Error::Util(ref err) => err.fmt(f),
Error::Io(ref err) => f.write_fmt(format_args!("{}", err)), Error::Io(ref err) => err.fmt(f),
Error::Block(ref err) => f.write_fmt(format_args!("{}", err)), Error::Block(ref err) => err.fmt(f),
Error::Execution(ref err) => f.write_fmt(format_args!("{}", err)), Error::Execution(ref err) => err.fmt(f),
Error::Transaction(ref err) => f.write_fmt(format_args!("{}", err)), Error::Transaction(ref err) => err.fmt(f),
Error::Import(ref err) => f.write_fmt(format_args!("{}", err)), Error::Import(ref err) => err.fmt(f),
Error::UnknownEngineName(ref name) => Error::UnknownEngineName(ref name) =>
f.write_fmt(format_args!("Unknown engine name ({})", name)), f.write_fmt(format_args!("Unknown engine name ({})", name)),
Error::PowHashInvalid => f.write_str("Invalid or out of date PoW hash."), Error::PowHashInvalid => f.write_str("Invalid or out of date PoW hash."),
Error::PowInvalid => f.write_str("Invalid nonce or mishash"), Error::PowInvalid => f.write_str("Invalid nonce or mishash"),
Error::Trie(ref err) => f.write_fmt(format_args!("{}", err)), Error::Trie(ref err) => err.fmt(f),
Error::StdIo(ref err) => f.write_fmt(format_args!("{}", err)), Error::StdIo(ref err) => err.fmt(f),
Error::Snappy(ref err) => f.write_fmt(format_args!("{}", err)), Error::Snappy(ref err) => err.fmt(f),
Error::Snapshot(ref err) => err.fmt(f),
} }
} }
} }
@ -329,12 +334,6 @@ impl From<::std::io::Error> for Error {
} }
} }
impl From<::util::snappy::InvalidInput> for Error {
fn from(err: ::util::snappy::InvalidInput) -> Error {
Error::Snappy(err)
}
}
impl From<BlockImportError> for Error { impl From<BlockImportError> for Error {
fn from(err: BlockImportError) -> Error { fn from(err: BlockImportError) -> Error {
match err { match err {
@ -345,6 +344,23 @@ impl From<BlockImportError> for Error {
} }
} }
impl From<snappy::InvalidInput> for Error {
fn from(err: snappy::InvalidInput) -> Error {
Error::Snappy(err)
}
}
impl From<SnapshotError> for Error {
fn from(err: SnapshotError) -> Error {
match err {
SnapshotError::Io(err) => Error::StdIo(err),
SnapshotError::Trie(err) => Error::Trie(err),
SnapshotError::Decoder(err) => err.into(),
other => Error::Snapshot(other),
}
}
}
impl<E> From<Box<E>> for Error where Error: From<E> { impl<E> From<Box<E>> for Error where Error: From<E> {
fn from(err: Box<E>) -> Error { fn from(err: Box<E>) -> Error {
Error::from(*err) Error::from(*err)

View File

@ -332,7 +332,7 @@ mod tests {
struct TestSetup { struct TestSetup {
state: GuardedTempResult<State>, state: GuardedTempResult<State>,
engine: Box<Engine>, engine: Arc<Engine>,
sub_state: Substate, sub_state: Substate,
env_info: EnvInfo env_info: EnvInfo
} }

View File

@ -22,7 +22,6 @@ use tests::helpers::*;
use devtools::*; use devtools::*;
use spec::Genesis; use spec::Genesis;
use ethjson; use ethjson;
use ethjson::blockchain::BlockChain;
use miner::Miner; use miner::Miner;
use io::IoChannel; use io::IoChannel;
@ -43,7 +42,7 @@ pub fn json_chain_test(json_data: &[u8], era: ChainEra) -> Vec<String> {
flush!(" - {}...", name); flush!(" - {}...", name);
let spec = |blockchain: &BlockChain| { let spec = {
let genesis = Genesis::from(blockchain.genesis()); let genesis = Genesis::from(blockchain.genesis());
let state = From::from(blockchain.pre_state.clone()); let state = From::from(blockchain.pre_state.clone());
let mut spec = match era { let mut spec = match era {
@ -61,9 +60,9 @@ pub fn json_chain_test(json_data: &[u8], era: ChainEra) -> Vec<String> {
{ {
let client = Client::new( let client = Client::new(
ClientConfig::default(), ClientConfig::default(),
spec(&blockchain), &spec,
temp.as_path(), temp.as_path(),
Arc::new(Miner::with_spec(spec(&blockchain))), Arc::new(Miner::with_spec(&spec)),
IoChannel::disconnected() IoChannel::disconnected()
).unwrap(); ).unwrap();
for b in &blockchain.blocks_rlp() { for b in &blockchain.blocks_rlp() {

View File

@ -96,6 +96,7 @@ pub extern crate ethstore;
extern crate semver; extern crate semver;
extern crate ethcore_ipc_nano as nanoipc; extern crate ethcore_ipc_nano as nanoipc;
extern crate ethcore_devtools as devtools; extern crate ethcore_devtools as devtools;
extern crate rand;
extern crate bit_set; extern crate bit_set;
#[cfg(feature = "jit" )] extern crate evmjit; #[cfg(feature = "jit" )] extern crate evmjit;

View File

@ -177,7 +177,7 @@ pub struct Miner {
gas_range_target: RwLock<(U256, U256)>, gas_range_target: RwLock<(U256, U256)>,
author: RwLock<Address>, author: RwLock<Address>,
extra_data: RwLock<Bytes>, extra_data: RwLock<Bytes>,
spec: Spec, engine: Arc<Engine>,
accounts: Option<Arc<AccountProvider>>, accounts: Option<Arc<AccountProvider>>,
work_poster: Option<WorkPoster>, work_poster: Option<WorkPoster>,
@ -186,7 +186,7 @@ pub struct Miner {
impl Miner { impl Miner {
/// Creates new instance of miner without accounts, but with given spec. /// Creates new instance of miner without accounts, but with given spec.
pub fn with_spec(spec: Spec) -> Miner { pub fn with_spec(spec: &Spec) -> Miner {
Miner { Miner {
transaction_queue: Arc::new(Mutex::new(TransactionQueue::new())), transaction_queue: Arc::new(Mutex::new(TransactionQueue::new())),
options: Default::default(), options: Default::default(),
@ -197,14 +197,14 @@ impl Miner {
author: RwLock::new(Address::default()), author: RwLock::new(Address::default()),
extra_data: RwLock::new(Vec::new()), extra_data: RwLock::new(Vec::new()),
accounts: None, accounts: None,
spec: spec, engine: spec.engine.clone(),
work_poster: None, work_poster: None,
gas_pricer: Mutex::new(GasPricer::new_fixed(20_000_000_000u64.into())), gas_pricer: Mutex::new(GasPricer::new_fixed(20_000_000_000u64.into())),
} }
} }
/// Creates new instance of miner /// Creates new instance of miner
pub fn new(options: MinerOptions, gas_pricer: GasPricer, spec: Spec, accounts: Option<Arc<AccountProvider>>) -> Arc<Miner> { pub fn new(options: MinerOptions, gas_pricer: GasPricer, spec: &Spec, accounts: Option<Arc<AccountProvider>>) -> Arc<Miner> {
let work_poster = if !options.new_work_notify.is_empty() { Some(WorkPoster::new(&options.new_work_notify)) } else { None }; let work_poster = if !options.new_work_notify.is_empty() { Some(WorkPoster::new(&options.new_work_notify)) } else { None };
let txq = Arc::new(Mutex::new(TransactionQueue::with_limits(options.tx_queue_size, options.tx_gas_limit))); let txq = Arc::new(Mutex::new(TransactionQueue::with_limits(options.tx_queue_size, options.tx_gas_limit)));
Arc::new(Miner { Arc::new(Miner {
@ -217,16 +217,12 @@ impl Miner {
extra_data: RwLock::new(Vec::new()), extra_data: RwLock::new(Vec::new()),
options: options, options: options,
accounts: accounts, accounts: accounts,
spec: spec, engine: spec.engine.clone(),
work_poster: work_poster, work_poster: work_poster,
gas_pricer: Mutex::new(gas_pricer), gas_pricer: Mutex::new(gas_pricer),
}) })
} }
fn engine(&self) -> &Engine {
self.spec.engine.deref()
}
fn forced_sealing(&self) -> bool { fn forced_sealing(&self) -> bool {
self.options.force_sealing || !self.options.new_work_notify.is_empty() self.options.force_sealing || !self.options.new_work_notify.is_empty()
} }
@ -274,8 +270,7 @@ impl Miner {
Some(old_block) => { Some(old_block) => {
trace!(target: "miner", "Already have previous work; updating and returning"); trace!(target: "miner", "Already have previous work; updating and returning");
// add transactions to old_block // add transactions to old_block
let e = self.engine(); old_block.reopen(&*self.engine, chain.vm_factory())
old_block.reopen(e, chain.vm_factory())
} }
None => { None => {
// block not found - create it. // block not found - create it.
@ -338,13 +333,13 @@ impl Miner {
if !block.transactions().is_empty() { if !block.transactions().is_empty() {
trace!(target: "miner", "prepare_sealing: block has transaction - attempting internal seal."); trace!(target: "miner", "prepare_sealing: block has transaction - attempting internal seal.");
// block with transactions - see if we can seal immediately. // block with transactions - see if we can seal immediately.
let s = self.engine().generate_seal(block.block(), match self.accounts { let s = self.engine.generate_seal(block.block(), match self.accounts {
Some(ref x) => Some(&**x), Some(ref x) => Some(&**x),
None => None, None => None,
}); });
if let Some(seal) = s { if let Some(seal) = s {
trace!(target: "miner", "prepare_sealing: managed internal seal. importing..."); trace!(target: "miner", "prepare_sealing: managed internal seal. importing...");
if let Ok(sealed) = block.lock().try_seal(self.engine(), seal) { if let Ok(sealed) = block.lock().try_seal(&*self.engine, seal) {
if let Ok(_) = chain.import_block(sealed.rlp_bytes()) { if let Ok(_) = chain.import_block(sealed.rlp_bytes()) {
trace!(target: "miner", "prepare_sealing: sealed internally and imported. leaving."); trace!(target: "miner", "prepare_sealing: sealed internally and imported. leaving.");
} else { } else {
@ -497,7 +492,7 @@ impl MinerService for Miner {
state.add_balance(&sender, &(needed_balance - balance)); state.add_balance(&sender, &(needed_balance - balance));
} }
let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false }; let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false };
let mut ret = try!(Executive::new(&mut state, &env_info, self.engine(), chain.vm_factory()).transact(t, options)); let mut ret = try!(Executive::new(&mut state, &env_info, &*self.engine, chain.vm_factory()).transact(t, options));
// TODO gav move this into Executive. // TODO gav move this into Executive.
ret.state_diff = original_state.map(|original| state.diff_from(original)); ret.state_diff = original_state.map(|original| state.diff_from(original));
@ -795,7 +790,7 @@ impl MinerService for Miner {
fn submit_seal(&self, chain: &MiningBlockChainClient, pow_hash: H256, seal: Vec<Bytes>) -> Result<(), Error> { fn submit_seal(&self, chain: &MiningBlockChainClient, pow_hash: H256, seal: Vec<Bytes>) -> Result<(), Error> {
let result = if let Some(b) = self.sealing_work.lock().queue.get_used_if(if self.options.enable_resubmission { GetAction::Clone } else { GetAction::Take }, |b| &b.hash() == &pow_hash) { let result = if let Some(b) = self.sealing_work.lock().queue.get_used_if(if self.options.enable_resubmission { GetAction::Clone } else { GetAction::Take }, |b| &b.hash() == &pow_hash) {
b.lock().try_seal(self.engine(), seal).or_else(|_| { b.lock().try_seal(&*self.engine, seal).or_else(|_| {
warn!(target: "miner", "Mined solution rejected: Invalid."); warn!(target: "miner", "Mined solution rejected: Invalid.");
Err(Error::PowInvalid) Err(Error::PowInvalid)
}) })
@ -897,7 +892,7 @@ mod tests {
fn should_prepare_block_to_seal() { fn should_prepare_block_to_seal() {
// given // given
let client = TestBlockChainClient::default(); let client = TestBlockChainClient::default();
let miner = Miner::with_spec(Spec::new_test()); let miner = Miner::with_spec(&Spec::new_test());
// when // when
let sealing_work = miner.map_sealing_work(&client, |_| ()); let sealing_work = miner.map_sealing_work(&client, |_| ());
@ -908,7 +903,7 @@ mod tests {
fn should_still_work_after_a_couple_of_blocks() { fn should_still_work_after_a_couple_of_blocks() {
// given // given
let client = TestBlockChainClient::default(); let client = TestBlockChainClient::default();
let miner = Miner::with_spec(Spec::new_test()); let miner = Miner::with_spec(&Spec::new_test());
let res = miner.map_sealing_work(&client, |b| b.block().fields().header.hash()); let res = miner.map_sealing_work(&client, |b| b.block().fields().header.hash());
assert!(res.is_some()); assert!(res.is_some());
@ -940,7 +935,7 @@ mod tests {
enable_resubmission: true, enable_resubmission: true,
}, },
GasPricer::new_fixed(0u64.into()), GasPricer::new_fixed(0u64.into()),
Spec::new_test(), &Spec::new_test(),
None, // accounts provider None, // accounts provider
)).ok().expect("Miner was just created.") )).ok().expect("Miner was just created.")
} }

View File

@ -32,12 +32,12 @@
//! use ethcore::miner::{Miner, MinerService}; //! use ethcore::miner::{Miner, MinerService};
//! //!
//! fn main() { //! fn main() {
//! let miner: Miner = Miner::with_spec(ethereum::new_frontier()); //! let miner: Miner = Miner::with_spec(&ethereum::new_frontier());
//! // get status //! // get status
//! assert_eq!(miner.status().transactions_in_pending_queue, 0); //! assert_eq!(miner.status().transactions_in_pending_queue, 0);
//! //!
//! // Check block for sealing //! // Check block for sealing
//! //assert!(miner.sealing_block(client.deref()).lock().is_some()); //! //assert!(miner.sealing_block(&*client).lock().is_some());
//! } //! }
//! ``` //! ```

View File

@ -22,6 +22,7 @@ use spec::Spec;
use error::*; use error::*;
use client::{Client, ClientConfig, ChainNotify}; use client::{Client, ClientConfig, ChainNotify};
use miner::Miner; use miner::Miner;
use snapshot::service::Service as SnapshotService;
use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicBool;
#[cfg(feature="ipc")] #[cfg(feature="ipc")]
@ -38,12 +39,17 @@ pub enum ClientIoMessage {
BlockVerified, BlockVerified,
/// New transaction RLPs are ready to be imported /// New transaction RLPs are ready to be imported
NewTransactions(Vec<Bytes>), NewTransactions(Vec<Bytes>),
/// Feed a state chunk to the snapshot service
FeedStateChunk(H256, Bytes),
/// Feed a block chunk to the snapshot service
FeedBlockChunk(H256, Bytes),
} }
/// Client service setup. Creates and registers client and network services with the IO subsystem. /// Client service setup. Creates and registers client and network services with the IO subsystem.
pub struct ClientService { pub struct ClientService {
io_service: Arc<IoService<ClientIoMessage>>, io_service: Arc<IoService<ClientIoMessage>>,
client: Arc<Client>, client: Arc<Client>,
snapshot: Arc<SnapshotService>,
panic_handler: Arc<PanicHandler>, panic_handler: Arc<PanicHandler>,
_stop_guard: ::devtools::StopGuard, _stop_guard: ::devtools::StopGuard,
} }
@ -52,7 +58,7 @@ impl ClientService {
/// Start the service in a separate thread. /// Start the service in a separate thread.
pub fn start( pub fn start(
config: ClientConfig, config: ClientConfig,
spec: Spec, spec: &Spec,
db_path: &Path, db_path: &Path,
miner: Arc<Miner>, miner: Arc<Miner>,
) -> Result<ClientService, Error> ) -> Result<ClientService, Error>
@ -65,10 +71,17 @@ impl ClientService {
if spec.fork_name.is_some() { if spec.fork_name.is_some() {
warn!("Your chain is an alternative fork. {}", Colour::Red.bold().paint("TRANSACTIONS MAY BE REPLAYED ON THE MAINNET!")); warn!("Your chain is an alternative fork. {}", Colour::Red.bold().paint("TRANSACTIONS MAY BE REPLAYED ON THE MAINNET!"));
} }
let client = try!(Client::new(config, spec, db_path, miner, io_service.channel()));
panic_handler.forward_from(client.deref()); let pruning = config.pruning;
let client = try!(Client::new(config, &spec, db_path, miner, io_service.channel()));
let snapshot = try!(SnapshotService::new(spec, pruning, db_path.into(), io_service.channel()));
let snapshot = Arc::new(snapshot);
panic_handler.forward_from(&*client);
let client_io = Arc::new(ClientIoHandler { let client_io = Arc::new(ClientIoHandler {
client: client.clone() client: client.clone(),
snapshot: snapshot.clone(),
}); });
try!(io_service.register_handler(client_io)); try!(io_service.register_handler(client_io));
@ -78,6 +91,7 @@ impl ClientService {
Ok(ClientService { Ok(ClientService {
io_service: Arc::new(io_service), io_service: Arc::new(io_service),
client: client, client: client,
snapshot: snapshot,
panic_handler: panic_handler, panic_handler: panic_handler,
_stop_guard: stop_guard, _stop_guard: stop_guard,
}) })
@ -98,6 +112,11 @@ impl ClientService {
self.client.clone() self.client.clone()
} }
/// Get snapshot interface.
pub fn snapshot_service(&self) -> Arc<SnapshotService> {
self.snapshot.clone()
}
/// Get network service component /// Get network service component
pub fn io(&self) -> Arc<IoService<ClientIoMessage>> { pub fn io(&self) -> Arc<IoService<ClientIoMessage>> {
self.io_service.clone() self.io_service.clone()
@ -117,7 +136,8 @@ impl MayPanic for ClientService {
/// IO interface for the Client handler /// IO interface for the Client handler
struct ClientIoHandler { struct ClientIoHandler {
client: Arc<Client> client: Arc<Client>,
snapshot: Arc<SnapshotService>,
} }
const CLIENT_TICK_TIMER: TimerToken = 0; const CLIENT_TICK_TIMER: TimerToken = 0;
@ -139,6 +159,8 @@ impl IoHandler<ClientIoMessage> for ClientIoHandler {
match *net_message { match *net_message {
ClientIoMessage::BlockVerified => { self.client.import_verified_blocks(); } ClientIoMessage::BlockVerified => { self.client.import_verified_blocks(); }
ClientIoMessage::NewTransactions(ref transactions) => { self.client.import_queued_transactions(transactions); } ClientIoMessage::NewTransactions(ref transactions) => { self.client.import_queued_transactions(transactions); }
ClientIoMessage::FeedStateChunk(ref hash, ref chunk) => self.snapshot.feed_state_chunk(*hash, chunk),
ClientIoMessage::FeedBlockChunk(ref hash, ref chunk) => self.snapshot.feed_block_chunk(*hash, chunk),
_ => {} // ignore other messages _ => {} // ignore other messages
} }
} }
@ -172,11 +194,16 @@ mod tests {
#[test] #[test]
fn it_can_be_started() { fn it_can_be_started() {
let temp_path = RandomTempPath::new(); let temp_path = RandomTempPath::new();
let mut path = temp_path.as_path().to_owned();
path.push("pruning");
path.push("db");
let spec = get_test_spec();
let service = ClientService::start( let service = ClientService::start(
ClientConfig::default(), ClientConfig::default(),
get_test_spec(), &spec,
temp_path.as_path(), &path,
Arc::new(Miner::with_spec(get_test_spec())), Arc::new(Miner::with_spec(&spec)),
); );
assert!(service.is_ok()); assert!(service.is_ok());
} }

View File

@ -17,9 +17,9 @@
//! Account state encoding and decoding //! Account state encoding and decoding
use account_db::{AccountDB, AccountDBMut}; use account_db::{AccountDB, AccountDBMut};
use error::Error;
use util::{U256, FixedHash, H256, Bytes, HashDB, SHA3_EMPTY, TrieDB}; use util::{U256, FixedHash, H256, Bytes, HashDB, SHA3_EMPTY, TrieDB};
use util::rlp::{Rlp, RlpStream, Stream, UntrustedRlp, View}; use util::rlp::{Rlp, RlpStream, Stream, UntrustedRlp, View};
use snapshot::Error;
// An alternate account structure from ::account::Account. // An alternate account structure from ::account::Account.
#[derive(PartialEq, Clone, Debug)] #[derive(PartialEq, Clone, Debug)]
@ -130,39 +130,25 @@ impl Account {
code_hash: code_hash, code_hash: code_hash,
}) })
} }
#[cfg(test)]
pub fn storage_root_mut(&mut self) -> &mut H256 {
&mut self.storage_root
}
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use account_db::{AccountDB, AccountDBMut}; use account_db::{AccountDB, AccountDBMut};
use tests::helpers::get_temp_journal_db; use tests::helpers::get_temp_journal_db;
use snapshot::tests::helpers::fill_storage;
use util::{SHA3_NULL_RLP, SHA3_EMPTY}; use util::{SHA3_NULL_RLP, SHA3_EMPTY};
use util::hash::{Address, FixedHash, H256}; use util::hash::{Address, FixedHash, H256};
use util::rlp::{UntrustedRlp, View}; use util::rlp::{UntrustedRlp, View};
use util::trie::{Alphabet, StandardMap, SecTrieDBMut, TrieMut, ValueMode};
use super::Account; use super::Account;
fn fill_storage(mut db: AccountDBMut) -> H256 {
let map = StandardMap {
alphabet: Alphabet::All,
min_key: 6,
journal_key: 6,
value_mode: ValueMode::Random,
count: 100
};
let mut root = H256::new();
{
let mut trie = SecTrieDBMut::new(&mut db, &mut root);
for (k, v) in map.make() {
trie.insert(&k, &v).unwrap();
}
}
root
}
#[test] #[test]
fn encoding_basic() { fn encoding_basic() {
let mut db = get_temp_journal_db(); let mut db = get_temp_journal_db();
@ -190,12 +176,16 @@ mod tests {
let mut db = &mut **db; let mut db = &mut **db;
let addr = Address::random(); let addr = Address::random();
let root = fill_storage(AccountDBMut::new(db.as_hashdb_mut(), &addr)); let account = {
let account = Account { let acct_db = AccountDBMut::new(db.as_hashdb_mut(), &addr);
let mut root = SHA3_NULL_RLP;
fill_storage(acct_db, &mut root, &mut H256::zero());
Account {
nonce: 25.into(), nonce: 25.into(),
balance: 987654321.into(), balance: 987654321.into(),
storage_root: root, storage_root: root,
code_hash: SHA3_EMPTY, code_hash: SHA3_EMPTY,
}
}; };
let thin_rlp = account.to_thin_rlp(); let thin_rlp = account.to_thin_rlp();

View File

@ -16,9 +16,6 @@
//! Block RLP compression. //! Block RLP compression.
// TODO [rob] remove when BlockRebuilder done.
#![allow(dead_code)]
use block::Block; use block::Block;
use header::Header; use header::Header;
@ -50,10 +47,9 @@ impl AbridgedBlock {
/// producing new rlp. /// producing new rlp.
pub fn from_block_view(block_view: &BlockView) -> Self { pub fn from_block_view(block_view: &BlockView) -> Self {
let header = block_view.header_view(); let header = block_view.header_view();
let seal_fields = header.seal(); let seal_fields = header.seal();
// 10 header fields, unknown amount of seal fields, and 2 block fields. // 10 header fields, unknown number of seal fields, and 2 block fields.
let mut stream = RlpStream::new_list( let mut stream = RlpStream::new_list(
HEADER_FIELDS + HEADER_FIELDS +
seal_fields.len() + seal_fields.len() +
@ -110,25 +106,17 @@ impl AbridgedBlock {
let transactions = try!(rlp.val_at(10)); let transactions = try!(rlp.val_at(10));
let uncles: Vec<Header> = try!(rlp.val_at(11)); let uncles: Vec<Header> = try!(rlp.val_at(11));
// iterator-based approach is cleaner but doesn't work w/ try. let mut uncles_rlp = RlpStream::new();
let seal = { uncles_rlp.append(&uncles);
let mut seal = Vec::new(); header.uncles_hash = uncles_rlp.as_raw().sha3();
for i in 12..rlp.item_count() { let mut seal_fields = Vec::new();
seal.push(try!(rlp.val_at(i))); for i in (HEADER_FIELDS + BLOCK_FIELDS)..rlp.item_count() {
let seal_rlp = try!(rlp.at(i));
seal_fields.push(seal_rlp.as_raw().to_owned());
} }
seal header.set_seal(seal_fields);
};
header.set_seal(seal);
let uncle_bytes = uncles.iter()
.fold(RlpStream::new_list(uncles.len()), |mut s, u| {
s.append_raw(&u.rlp(::basic_types::Seal::With), 1);
s
}).out();
header.uncles_hash = uncle_bytes.sha3();
Ok(Block { Ok(Block {
header: header, header: header,
@ -145,17 +133,10 @@ mod tests {
use super::AbridgedBlock; use super::AbridgedBlock;
use types::transaction::{Action, Transaction}; use types::transaction::{Action, Transaction};
use util::{Address, H256, FixedHash, U256}; use util::{Address, H256, FixedHash, U256, Bytes};
use util::{Bytes, RlpStream, Stream};
fn encode_block(b: &Block) -> Bytes { fn encode_block(b: &Block) -> Bytes {
let mut s = RlpStream::new_list(3); b.rlp_bytes(::basic_types::Seal::With)
b.header.stream_rlp(&mut s, ::basic_types::Seal::With);
s.append(&b.transactions);
s.append(&b.uncles);
s.out()
} }
#[test] #[test]

View File

@ -0,0 +1,68 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Snapshot-related errors.
use std::fmt;
use util::H256;
use util::trie::TrieError;
use util::rlp::DecoderError;
/// Snapshot-related errors.
#[derive(Debug)]
pub enum Error {
/// Invalid starting block for snapshot.
InvalidStartingBlock(H256),
/// Block not found.
BlockNotFound(H256),
/// Trie error.
Trie(TrieError),
/// Decoder error.
Decoder(DecoderError),
/// Io error.
Io(::std::io::Error),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::InvalidStartingBlock(ref hash) => write!(f, "Invalid starting block hash: {}", hash),
Error::BlockNotFound(ref hash) => write!(f, "Block not found in chain: {}", hash),
Error::Io(ref err) => err.fmt(f),
Error::Decoder(ref err) => err.fmt(f),
Error::Trie(ref err) => err.fmt(f),
}
}
}
impl From<::std::io::Error> for Error {
fn from(err: ::std::io::Error) -> Self {
Error::Io(err)
}
}
impl From<Box<TrieError>> for Error {
fn from(err: Box<TrieError>) -> Self {
Error::Trie(*err)
}
}
impl From<DecoderError> for Error {
fn from(err: DecoderError) -> Self {
Error::Decoder(err)
}
}

343
ethcore/src/snapshot/io.rs Normal file
View File

@ -0,0 +1,343 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Snapshot i/o.
//! Ways of writing and reading snapshots. This module supports writing and reading
//! snapshots of two different formats: packed and loose.
//! Packed snapshots are written to a single file, and loose snapshots are
//! written to multiple files in one directory.
use std::collections::HashMap;
use std::io::{self, Read, Seek, SeekFrom, Write};
use std::fs::{self, File};
use std::path::{Path, PathBuf};
use util::Bytes;
use util::hash::H256;
use util::rlp::{self, Encodable, RlpStream, UntrustedRlp, Stream, View};
use super::ManifestData;
/// Something which can write snapshots.
/// Writing the same chunk multiple times will lead to implementation-defined
/// behavior, and is not advised.
pub trait SnapshotWriter {
/// Write a compressed state chunk.
fn write_state_chunk(&mut self, hash: H256, chunk: &[u8]) -> io::Result<()>;
/// Write a compressed block chunk.
fn write_block_chunk(&mut self, hash: H256, chunk: &[u8]) -> io::Result<()>;
/// Complete writing. The manifest's chunk lists must be consistent
/// with the chunks written.
fn finish(self, manifest: ManifestData) -> io::Result<()> where Self: Sized;
}
// (hash, len, offset)
struct ChunkInfo(H256, u64, u64);
impl Encodable for ChunkInfo {
fn rlp_append(&self, s: &mut RlpStream) {
s.begin_list(3);
s.append(&self.0).append(&self.1).append(&self.2);
}
}
impl rlp::Decodable for ChunkInfo {
fn decode<D: rlp::Decoder>(decoder: &D) -> Result<Self, rlp::DecoderError> {
let d = decoder.as_rlp();
let hash = try!(d.val_at(0));
let len = try!(d.val_at(1));
let off = try!(d.val_at(2));
Ok(ChunkInfo(hash, len, off))
}
}
/// A packed snapshot writer. This writes snapshots to a single concatenated file.
///
/// The file format is very simple and consists of three parts:
/// [Concatenated chunk data]
/// [manifest as RLP]
/// [manifest start offset (8 bytes little-endian)]
///
/// The manifest contains all the same information as a standard `ManifestData`,
/// but also maps chunk hashes to their lengths and offsets in the file
/// for easy reading.
pub struct PackedWriter {
file: File,
state_hashes: Vec<ChunkInfo>,
block_hashes: Vec<ChunkInfo>,
cur_len: u64,
}
impl PackedWriter {
/// Create a new "PackedWriter", to write into the file at the given path.
pub fn new(path: &Path) -> io::Result<Self> {
Ok(PackedWriter {
file: try!(File::create(path)),
state_hashes: Vec::new(),
block_hashes: Vec::new(),
cur_len: 0,
})
}
}
impl SnapshotWriter for PackedWriter {
fn write_state_chunk(&mut self, hash: H256, chunk: &[u8]) -> io::Result<()> {
try!(self.file.write_all(chunk));
let len = chunk.len() as u64;
self.state_hashes.push(ChunkInfo(hash, len, self.cur_len));
self.cur_len += len;
Ok(())
}
fn write_block_chunk(&mut self, hash: H256, chunk: &[u8]) -> io::Result<()> {
try!(self.file.write_all(chunk));
let len = chunk.len() as u64;
self.block_hashes.push(ChunkInfo(hash, len, self.cur_len));
self.cur_len += len;
Ok(())
}
fn finish(mut self, manifest: ManifestData) -> io::Result<()> {
// we ignore the hashes fields of the manifest under the assumption that
// they are consistent with ours.
let mut stream = RlpStream::new_list(5);
stream
.append(&self.state_hashes)
.append(&self.block_hashes)
.append(&manifest.state_root)
.append(&manifest.block_number)
.append(&manifest.block_hash);
let manifest_rlp = stream.out();
try!(self.file.write_all(&manifest_rlp));
let off = self.cur_len;
trace!(target: "snapshot_io", "writing manifest of len {} to offset {}", manifest_rlp.len(), off);
let off_bytes: [u8; 8] =
[
off as u8,
(off >> 8) as u8,
(off >> 16) as u8,
(off >> 24) as u8,
(off >> 32) as u8,
(off >> 40) as u8,
(off >> 48) as u8,
(off >> 56) as u8,
];
try!(self.file.write_all(&off_bytes[..]));
Ok(())
}
}
/// A "loose" writer writes chunk files into a directory.
pub struct LooseWriter {
dir: PathBuf,
}
impl LooseWriter {
/// Create a new LooseWriter which will write into the given directory,
/// creating it if it doesn't exist.
pub fn new(path: PathBuf) -> io::Result<Self> {
try!(fs::create_dir_all(&path));
Ok(LooseWriter {
dir: path,
})
}
// writing logic is the same for both kinds of chunks.
fn write_chunk(&mut self, hash: H256, chunk: &[u8]) -> io::Result<()> {
let mut file_path = self.dir.clone();
file_path.push(hash.hex());
let mut file = try!(File::create(file_path));
try!(file.write_all(chunk));
Ok(())
}
}
impl SnapshotWriter for LooseWriter {
fn write_state_chunk(&mut self, hash: H256, chunk: &[u8]) -> io::Result<()> {
self.write_chunk(hash, chunk)
}
fn write_block_chunk(&mut self, hash: H256, chunk: &[u8]) -> io::Result<()> {
self.write_chunk(hash, chunk)
}
fn finish(self, manifest: ManifestData) -> io::Result<()> {
let rlp = manifest.into_rlp();
let mut path = self.dir.clone();
path.push("MANIFEST");
let mut file = try!(File::create(path));
try!(file.write_all(&rlp[..]));
Ok(())
}
}
/// Something which can read compressed snapshots.
pub trait SnapshotReader {
/// Get the manifest data for this snapshot.
fn manifest(&self) -> &ManifestData;
/// Get raw chunk data by hash. implementation defined behavior
/// if a chunk not in the manifest is requested.
fn chunk(&self, hash: H256) -> io::Result<Bytes>;
}
/// Packed snapshot reader.
pub struct PackedReader {
file: File,
state_hashes: HashMap<H256, (u64, u64)>, // len, offset
block_hashes: HashMap<H256, (u64, u64)>, // len, offset
manifest: ManifestData,
}
impl PackedReader {
/// Create a new `PackedReader` for the file at the given path.
/// This will fail if any io errors are encountered or the file
/// is not a valid packed snapshot.
pub fn new(path: &Path) -> Result<Option<Self>, ::error::Error> {
let mut file = try!(File::open(path));
let file_len = try!(file.metadata()).len();
if file_len < 8 {
// ensure we don't seek before beginning.
return Ok(None);
}
try!(file.seek(SeekFrom::End(-8)));
let mut off_bytes = [0u8; 8];
try!(file.read_exact(&mut off_bytes[..]));
let manifest_off: u64 =
((off_bytes[7] as u64) << 56) +
((off_bytes[6] as u64) << 48) +
((off_bytes[5] as u64) << 40) +
((off_bytes[4] as u64) << 32) +
((off_bytes[3] as u64) << 24) +
((off_bytes[2] as u64) << 16) +
((off_bytes[1] as u64) << 8) +
(off_bytes[0] as u64);
let manifest_len = file_len - manifest_off - 8;
trace!(target: "snapshot", "loading manifest of length {} from offset {}", manifest_len, manifest_off);
let mut manifest_buf = vec![0; manifest_len as usize];
try!(file.seek(SeekFrom::Start(manifest_off)));
try!(file.read_exact(&mut manifest_buf));
let rlp = UntrustedRlp::new(&manifest_buf);
let state: Vec<ChunkInfo> = try!(rlp.val_at(0));
let blocks: Vec<ChunkInfo> = try!(rlp.val_at(1));
let manifest = ManifestData {
state_hashes: state.iter().map(|c| c.0).collect(),
block_hashes: blocks.iter().map(|c| c.0).collect(),
state_root: try!(rlp.val_at(2)),
block_number: try!(rlp.val_at(3)),
block_hash: try!(rlp.val_at(4)),
};
Ok(Some(PackedReader {
file: file,
state_hashes: state.into_iter().map(|c| (c.0, (c.1, c.2))).collect(),
block_hashes: blocks.into_iter().map(|c| (c.0, (c.1, c.2))).collect(),
manifest: manifest
}))
}
}
impl SnapshotReader for PackedReader {
fn manifest(&self) -> &ManifestData {
&self.manifest
}
fn chunk(&self, hash: H256) -> io::Result<Bytes> {
let &(len, off) = self.state_hashes.get(&hash).or_else(|| self.block_hashes.get(&hash))
.expect("only chunks in the manifest can be requested; qed");
let mut file = &self.file;
try!(file.seek(SeekFrom::Start(off)));
let mut buf = vec![0; len as usize];
try!(file.read_exact(&mut buf[..]));
Ok(buf)
}
}
/// reader for "loose" snapshots
pub struct LooseReader {
dir: PathBuf,
manifest: ManifestData,
}
impl LooseReader {
/// Create a new `LooseReader` which will read the manifest and chunk data from
/// the given directory.
pub fn new(mut dir: PathBuf) -> Result<Self, ::error::Error> {
let mut manifest_buf = Vec::new();
dir.push("MANIFEST");
let mut manifest_file = try!(File::open(&dir));
try!(manifest_file.read_to_end(&mut manifest_buf));
let manifest = try!(ManifestData::from_rlp(&manifest_buf[..]));
dir.pop();
Ok(LooseReader {
dir: dir,
manifest: manifest,
})
}
}
impl SnapshotReader for LooseReader {
fn manifest(&self) -> &ManifestData {
&self.manifest
}
fn chunk(&self, hash: H256) -> io::Result<Bytes> {
let mut path = self.dir.clone();
path.push(hash.hex());
let mut buf = Vec::new();
let mut file = try!(File::open(&path));
try!(file.read_to_end(&mut buf));
Ok(buf)
}
}

View File

@ -14,123 +14,120 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Snapshot creation helpers. //! Snapshot creation, restoration, and network service.
use std::collections::VecDeque; use std::collections::VecDeque;
use std::fs::{create_dir_all, File}; use std::sync::Arc;
use std::io::Write;
use std::path::{Path, PathBuf};
use account_db::{AccountDB, AccountDBMut}; use account_db::{AccountDB, AccountDBMut};
use client::BlockChainClient; use blockchain::{BlockChain, BlockProvider};
use error::Error; use engines::Engine;
use ids::BlockID; use views::BlockView;
use views::{BlockView, HeaderView};
use util::{Bytes, Hashable, HashDB, JournalDB, snappy, TrieDB, TrieDBMut, TrieMut, DBTransaction}; use util::{Bytes, Hashable, HashDB, snappy, TrieDB, TrieDBMut, TrieMut};
use util::error::UtilError; use util::Mutex;
use util::hash::{FixedHash, H256}; use util::hash::{FixedHash, H256};
use util::journaldb::{self, Algorithm, JournalDB};
use util::kvdb::Database;
use util::rlp::{DecoderError, RlpStream, Stream, UntrustedRlp, View, Compressible, RlpType}; use util::rlp::{DecoderError, RlpStream, Stream, UntrustedRlp, View, Compressible, RlpType};
use util::rlp::SHA3_NULL_RLP;
use self::account::Account; use self::account::Account;
use self::block::AbridgedBlock; use self::block::AbridgedBlock;
use self::io::SnapshotWriter;
use crossbeam::{scope, ScopedJoinHandle}; use crossbeam::{scope, ScopedJoinHandle};
use rand::{Rng, OsRng};
pub use self::error::Error;
pub use self::service::{RestorationStatus, Service, SnapshotService};
pub mod io;
pub mod service;
mod account; mod account;
mod block; mod block;
mod error;
// Try to have chunks be around 16MB (before compression) #[cfg(test)]
const PREFERRED_CHUNK_SIZE: usize = 16 * 1024 * 1024; mod tests;
/// Take a snapshot using the given client and database, writing into `path`. // Try to have chunks be around 4MB (before compression)
pub fn take_snapshot(client: &BlockChainClient, mut path: PathBuf, state_db: &HashDB) -> Result<(), Error> { const PREFERRED_CHUNK_SIZE: usize = 4 * 1024 * 1024;
let chain_info = client.chain_info();
let genesis_hash = chain_info.genesis_hash; // How many blocks to include in a snapshot, starting from the head of the chain.
let best_header_raw = client.best_block_header(); const SNAPSHOT_BLOCKS: u64 = 30000;
let best_header = HeaderView::new(&best_header_raw);
let state_root = best_header.state_root();
trace!(target: "snapshot", "Taking snapshot starting at block {}", best_header.number()); /// Take a snapshot using the given blockchain, starting block hash, and database, writing into the given writer.
pub fn take_snapshot<W: SnapshotWriter + Send>(chain: &BlockChain, start_block_hash: H256, state_db: &HashDB, writer: W) -> Result<(), Error> {
let start_header = try!(chain.block_header(&start_block_hash).ok_or(Error::InvalidStartingBlock(start_block_hash)));
let state_root = start_header.state_root();
let number = start_header.number();
let _ = create_dir_all(&path); info!("Taking snapshot starting at block {}", number);
let state_hashes = try!(chunk_state(state_db, &state_root, &path)); let writer = Mutex::new(writer);
let block_hashes = try!(chunk_blocks(client, best_header.hash(), genesis_hash, &path)); let (state_hashes, block_hashes) = try!(scope(|scope| {
let block_guard = scope.spawn(|| chunk_blocks(chain, (number, start_block_hash), &writer));
let state_res = chunk_state(state_db, state_root, &writer);
trace!(target: "snapshot", "produced {} state chunks and {} block chunks.", state_hashes.len(), block_hashes.len()); state_res.and_then(|state_hashes| {
block_guard.join().map(|block_hashes| (state_hashes, block_hashes))
})
}));
info!("produced {} state chunks and {} block chunks.", state_hashes.len(), block_hashes.len());
let manifest_data = ManifestData { let manifest_data = ManifestData {
state_hashes: state_hashes, state_hashes: state_hashes,
block_hashes: block_hashes, block_hashes: block_hashes,
state_root: state_root, state_root: *state_root,
block_number: chain_info.best_block_number, block_number: number,
block_hash: chain_info.best_block_hash, block_hash: start_block_hash,
}; };
path.push("MANIFEST"); try!(writer.into_inner().finish(manifest_data));
let mut manifest_file = try!(File::create(&path));
try!(manifest_file.write_all(&manifest_data.into_rlp()));
Ok(()) Ok(())
} }
// shared portion of write_chunk
// returns either a (hash, compressed_size) pair or an io error.
fn write_chunk(raw_data: &[u8], compression_buffer: &mut Vec<u8>, path: &Path) -> Result<(H256, usize), Error> {
let compressed_size = snappy::compress_into(raw_data, compression_buffer);
let compressed = &compression_buffer[..compressed_size];
let hash = compressed.sha3();
let mut file_path = path.to_owned();
file_path.push(hash.hex());
let mut file = try!(File::create(file_path));
try!(file.write_all(compressed));
Ok((hash, compressed_size))
}
/// Used to build block chunks. /// Used to build block chunks.
struct BlockChunker<'a> { struct BlockChunker<'a> {
client: &'a BlockChainClient, chain: &'a BlockChain,
// block, receipt rlp pairs. // block, receipt rlp pairs.
rlps: VecDeque<Bytes>, rlps: VecDeque<Bytes>,
current_hash: H256, current_hash: H256,
hashes: Vec<H256>, hashes: Vec<H256>,
snappy_buffer: Vec<u8>, snappy_buffer: Vec<u8>,
writer: &'a Mutex<SnapshotWriter + 'a>,
} }
impl<'a> BlockChunker<'a> { impl<'a> BlockChunker<'a> {
// Repeatedly fill the buffers and writes out chunks, moving backwards from starting block hash. // Repeatedly fill the buffers and writes out chunks, moving backwards from starting block hash.
// Loops until we reach the genesis, and writes out the remainder. // Loops until we reach the first desired block, and writes out the remainder.
fn chunk_all(&mut self, genesis_hash: H256, path: &Path) -> Result<(), Error> { fn chunk_all(&mut self, first_hash: H256) -> Result<(), Error> {
let mut loaded_size = 0; let mut loaded_size = 0;
while self.current_hash != genesis_hash { while self.current_hash != first_hash {
let block = self.client.block(BlockID::Hash(self.current_hash)) let (block, receipts) = try!(self.chain.block(&self.current_hash)
.expect("started from the head of chain and walking backwards; client stores full chain; qed"); .and_then(|b| self.chain.block_receipts(&self.current_hash).map(|r| (b, r)))
.ok_or(Error::BlockNotFound(self.current_hash)));
let view = BlockView::new(&block); let view = BlockView::new(&block);
let abridged_rlp = AbridgedBlock::from_block_view(&view).into_inner(); let abridged_rlp = AbridgedBlock::from_block_view(&view).into_inner();
let receipts = self.client.block_receipts(&self.current_hash)
.expect("started from head of chain and walking backwards; client stores full chain; qed");
let pair = { let pair = {
let mut pair_stream = RlpStream::new_list(2); let mut pair_stream = RlpStream::new_list(2);
pair_stream.append(&abridged_rlp).append(&receipts); pair_stream.append_raw(&abridged_rlp, 1).append(&receipts);
pair_stream.out() pair_stream.out()
}; };
let new_loaded_size = loaded_size + pair.len(); let new_loaded_size = loaded_size + pair.len();
// cut off the chunk if too large // cut off the chunk if too large.
if new_loaded_size > PREFERRED_CHUNK_SIZE { if new_loaded_size > PREFERRED_CHUNK_SIZE {
let header = view.header_view(); try!(self.write_chunk());
try!(self.write_chunk(header.parent_hash(), header.number(), path));
loaded_size = pair.len(); loaded_size = pair.len();
} else { } else {
loaded_size = new_loaded_size; loaded_size = new_loaded_size;
@ -141,25 +138,44 @@ impl<'a> BlockChunker<'a> {
} }
if loaded_size != 0 { if loaded_size != 0 {
// we don't store the genesis block, so once we get to this point, // we don't store the first block, so once we get to this point,
// the "first" block will be number 1. // the "first" block will be first_number + 1.
try!(self.write_chunk(genesis_hash, 1, path)); try!(self.write_chunk());
} }
Ok(()) Ok(())
} }
// write out the data in the buffers to a chunk on disk // write out the data in the buffers to a chunk on disk
fn write_chunk(&mut self, parent_hash: H256, number: u64, path: &Path) -> Result<(), Error> { //
// we preface each chunk with the parent of the first block's details.
fn write_chunk(&mut self) -> Result<(), Error> {
// since the block we're inspecting now doesn't go into the
// chunk if it's too large, the current hash is the parent hash
// for the first block in that chunk.
let parent_hash = self.current_hash;
trace!(target: "snapshot", "prepared block chunk with {} blocks", self.rlps.len()); trace!(target: "snapshot", "prepared block chunk with {} blocks", self.rlps.len());
let mut rlp_stream = RlpStream::new_list(self.rlps.len() + 2); let (parent_number, parent_details) = try!(self.chain.block_number(&parent_hash)
rlp_stream.append(&parent_hash).append(&number); .and_then(|n| self.chain.block_details(&parent_hash).map(|d| (n, d)))
.ok_or(Error::BlockNotFound(parent_hash)));
let parent_total_difficulty = parent_details.total_difficulty;
let mut rlp_stream = RlpStream::new_list(3 + self.rlps.len());
rlp_stream.append(&parent_number).append(&parent_hash).append(&parent_total_difficulty);
for pair in self.rlps.drain(..) { for pair in self.rlps.drain(..) {
rlp_stream.append_raw(&pair, 1); rlp_stream.append_raw(&pair, 1);
} }
let raw_data = rlp_stream.out(); let raw_data = rlp_stream.out();
let (hash, size) = try!(write_chunk(&raw_data, &mut self.snappy_buffer, path));
let size = snappy::compress_into(&raw_data, &mut self.snappy_buffer);
let compressed = &self.snappy_buffer[..size];
let hash = compressed.sha3();
try!(self.writer.lock().write_block_chunk(hash, compressed));
trace!(target: "snapshot", "wrote block chunk. hash: {}, size: {}, uncompressed size: {}", hash.hex(), size, raw_data.len()); trace!(target: "snapshot", "wrote block chunk. hash: {}, size: {}, uncompressed size: {}", hash.hex(), size, raw_data.len());
self.hashes.push(hash); self.hashes.push(hash);
@ -172,16 +188,29 @@ impl<'a> BlockChunker<'a> {
/// ///
/// The path parameter is the directory to store the block chunks in. /// The path parameter is the directory to store the block chunks in.
/// This function assumes the directory exists already. /// This function assumes the directory exists already.
pub fn chunk_blocks(client: &BlockChainClient, best_block_hash: H256, genesis_hash: H256, path: &Path) -> Result<Vec<H256>, Error> { /// Returns a list of chunk hashes, with the first having the blocks furthest from the genesis.
let mut chunker = BlockChunker { pub fn chunk_blocks<'a>(chain: &'a BlockChain, start_block_info: (u64, H256), writer: &Mutex<SnapshotWriter + 'a>) -> Result<Vec<H256>, Error> {
client: client, let (start_number, start_hash) = start_block_info;
rlps: VecDeque::new(),
current_hash: best_block_hash, let first_hash = if start_number < SNAPSHOT_BLOCKS {
hashes: Vec::new(), // use the genesis hash.
snappy_buffer: vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)], chain.genesis_hash()
} else {
let first_num = start_number - SNAPSHOT_BLOCKS;
chain.block_hash(first_num)
.expect("number before best block number; whole chain is stored; qed")
}; };
try!(chunker.chunk_all(genesis_hash, path)); let mut chunker = BlockChunker {
chain: chain,
rlps: VecDeque::new(),
current_hash: start_hash,
hashes: Vec::new(),
snappy_buffer: vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)],
writer: writer,
};
try!(chunker.chunk_all(first_hash));
Ok(chunker.hashes) Ok(chunker.hashes)
} }
@ -191,8 +220,8 @@ struct StateChunker<'a> {
hashes: Vec<H256>, hashes: Vec<H256>,
rlps: Vec<Bytes>, rlps: Vec<Bytes>,
cur_size: usize, cur_size: usize,
snapshot_path: &'a Path,
snappy_buffer: Vec<u8>, snappy_buffer: Vec<u8>,
writer: &'a Mutex<SnapshotWriter + 'a>,
} }
impl<'a> StateChunker<'a> { impl<'a> StateChunker<'a> {
@ -226,7 +255,12 @@ impl<'a> StateChunker<'a> {
} }
let raw_data = stream.out(); let raw_data = stream.out();
let (hash, compressed_size) = try!(write_chunk(&raw_data, &mut self.snappy_buffer, self.snapshot_path));
let compressed_size = snappy::compress_into(&raw_data, &mut self.snappy_buffer);
let compressed = &self.snappy_buffer[..compressed_size];
let hash = compressed.sha3();
try!(self.writer.lock().write_state_chunk(hash, compressed));
trace!(target: "snapshot", "wrote state chunk. size: {}, uncompressed size: {}", compressed_size, raw_data.len()); trace!(target: "snapshot", "wrote state chunk. size: {}, uncompressed size: {}", compressed_size, raw_data.len());
self.hashes.push(hash); self.hashes.push(hash);
@ -241,21 +275,21 @@ impl<'a> StateChunker<'a> {
/// ///
/// Returns a list of hashes of chunks created, or any error it may /// Returns a list of hashes of chunks created, or any error it may
/// have encountered. /// have encountered.
pub fn chunk_state(db: &HashDB, root: &H256, path: &Path) -> Result<Vec<H256>, Error> { pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex<SnapshotWriter + 'a>) -> Result<Vec<H256>, Error> {
let account_view = try!(TrieDB::new(db, &root)); let account_trie = try!(TrieDB::new(db, &root));
let mut chunker = StateChunker { let mut chunker = StateChunker {
hashes: Vec::new(), hashes: Vec::new(),
rlps: Vec::new(), rlps: Vec::new(),
cur_size: 0, cur_size: 0,
snapshot_path: path,
snappy_buffer: vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)], snappy_buffer: vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)],
writer: writer,
}; };
trace!(target: "snapshot", "beginning state chunking"); trace!(target: "snapshot", "beginning state chunking");
// account_key here is the address' hash. // account_key here is the address' hash.
for (account_key, account_data) in account_view.iter() { for (account_key, account_data) in account_trie.iter() {
let account = Account::from_thin_rlp(account_data); let account = Account::from_thin_rlp(account_data);
let account_key_hash = H256::from_slice(&account_key); let account_key_hash = H256::from_slice(&account_key);
@ -274,6 +308,7 @@ pub fn chunk_state(db: &HashDB, root: &H256, path: &Path) -> Result<Vec<H256>, E
} }
/// Manifest data. /// Manifest data.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ManifestData { pub struct ManifestData {
/// List of state chunk hashes. /// List of state chunk hashes.
pub state_hashes: Vec<H256>, pub state_hashes: Vec<H256>,
@ -324,63 +359,60 @@ impl ManifestData {
pub struct StateRebuilder { pub struct StateRebuilder {
db: Box<JournalDB>, db: Box<JournalDB>,
state_root: H256, state_root: H256,
snappy_buffer: Vec<u8>
} }
impl StateRebuilder { impl StateRebuilder {
/// Create a new state rebuilder to write into the given backing DB. /// Create a new state rebuilder to write into the given backing DB.
pub fn new(db: Box<JournalDB>) -> Self { pub fn new(db: Arc<Database>, pruning: Algorithm) -> Self {
StateRebuilder { StateRebuilder {
db: db, db: journaldb::new(db.clone(), pruning, ::client::DB_COL_STATE),
state_root: H256::zero(), state_root: SHA3_NULL_RLP,
snappy_buffer: Vec::new(),
} }
} }
/// Feed a compressed state chunk into the rebuilder. /// Feed an uncompressed state chunk into the rebuilder.
pub fn feed(&mut self, compressed: &[u8]) -> Result<(), Error> { pub fn feed(&mut self, chunk: &[u8]) -> Result<(), ::error::Error> {
let len = try!(snappy::decompress_into(compressed, &mut self.snappy_buffer)); let rlp = UntrustedRlp::new(chunk);
let rlp = UntrustedRlp::new(&self.snappy_buffer[..len]);
let account_fat_rlps: Vec<_> = rlp.iter().map(|r| r.as_raw()).collect(); let account_fat_rlps: Vec<_> = rlp.iter().map(|r| r.as_raw()).collect();
let mut pairs = Vec::with_capacity(rlp.item_count()); let mut pairs = Vec::with_capacity(rlp.item_count());
let backing = self.db.backing().clone();
// initialize the pairs vector with empty values so we have slots to write into. // initialize the pairs vector with empty values so we have slots to write into.
for _ in 0..rlp.item_count() { pairs.resize(rlp.item_count(), (H256::new(), Vec::new()));
pairs.push((H256::new(), Vec::new()));
}
let chunk_size = account_fat_rlps.len() / ::num_cpus::get(); let chunk_size = account_fat_rlps.len() / ::num_cpus::get() + 1;
// build account tries in parallel. // build account tries in parallel.
try!(scope(|scope| { try!(scope(|scope| {
let mut handles = Vec::new(); let mut handles = Vec::new();
for (account_chunk, out_pairs_chunk) in account_fat_rlps.chunks(chunk_size).zip(pairs.chunks_mut(chunk_size)) { for (account_chunk, out_pairs_chunk) in account_fat_rlps.chunks(chunk_size).zip(pairs.chunks_mut(chunk_size)) {
let mut db = self.db.boxed_clone(); let mut db = self.db.boxed_clone();
let handle: ScopedJoinHandle<Result<(), Error>> = scope.spawn(move || { let handle: ScopedJoinHandle<Result<Box<JournalDB>, ::error::Error>> = scope.spawn(move || {
try!(rebuild_account_trie(db.as_hashdb_mut(), account_chunk, out_pairs_chunk)); try!(rebuild_account_trie(db.as_hashdb_mut(), account_chunk, out_pairs_chunk));
// commit the db changes we made in this thread. trace!(target: "snapshot", "thread rebuilt {} account tries", account_chunk.len());
let batch = DBTransaction::new(&db.backing()); Ok(db)
try!(db.commit(&batch, 0, &H256::zero(), None));
try!(db.backing().write(batch).map_err(UtilError::SimpleString));
Ok(())
}); });
handles.push(handle); handles.push(handle);
} }
// see if we got any errors. // commit all account tries to the db, but only in this thread.
let batch = backing.transaction();
for handle in handles { for handle in handles {
try!(handle.join()); let mut thread_db = try!(handle.join());
try!(thread_db.inject(&batch));
} }
try!(backing.write(batch).map_err(::util::UtilError::SimpleString));
Ok::<_, Error>(())
Ok::<_, ::error::Error>(())
})); }));
// batch trie writes // batch trie writes
{ {
let mut account_trie = if self.state_root != H256::zero() { let mut account_trie = if self.state_root != SHA3_NULL_RLP {
try!(TrieDBMut::from_existing(self.db.as_hashdb_mut(), &mut self.state_root)) try!(TrieDBMut::from_existing(self.db.as_hashdb_mut(), &mut self.state_root))
} else { } else {
TrieDBMut::new(self.db.as_hashdb_mut(), &mut self.state_root) TrieDBMut::new(self.db.as_hashdb_mut(), &mut self.state_root)
@ -391,9 +423,10 @@ impl StateRebuilder {
} }
} }
let batch = DBTransaction::new(self.db.backing()); let batch = backing.transaction();
try!(self.db.commit(&batch, 0, &H256::zero(), None)); try!(self.db.inject(&batch));
try!(self.db.backing().write(batch).map_err(|e| Error::Util(e.into()))); try!(backing.write(batch).map_err(::util::UtilError::SimpleString));
trace!(target: "snapshot", "current state root: {:?}", self.state_root);
Ok(()) Ok(())
} }
@ -401,7 +434,7 @@ impl StateRebuilder {
pub fn state_root(&self) -> H256 { self.state_root } pub fn state_root(&self) -> H256 { self.state_root }
} }
fn rebuild_account_trie(db: &mut HashDB, account_chunk: &[&[u8]], out_chunk: &mut [(H256, Bytes)]) -> Result<(), Error> { fn rebuild_account_trie(db: &mut HashDB, account_chunk: &[&[u8]], out_chunk: &mut [(H256, Bytes)]) -> Result<(), ::error::Error> {
for (account_pair, out) in account_chunk.into_iter().zip(out_chunk) { for (account_pair, out) in account_chunk.into_iter().zip(out_chunk) {
let account_rlp = UntrustedRlp::new(account_pair); let account_rlp = UntrustedRlp::new(account_pair);
@ -410,7 +443,7 @@ fn rebuild_account_trie(db: &mut HashDB, account_chunk: &[&[u8]], out_chunk: &mu
let fat_rlp = UntrustedRlp::new(&decompressed[..]); let fat_rlp = UntrustedRlp::new(&decompressed[..]);
let thin_rlp = { let thin_rlp = {
let mut acct_db = AccountDBMut::from_hash(db.as_hashdb_mut(), hash); let mut acct_db = AccountDBMut::from_hash(db, hash);
// fill out the storage trie and code while decoding. // fill out the storage trie and code while decoding.
let acc = try!(Account::from_fat_rlp(&mut acct_db, fat_rlp)); let acc = try!(Account::from_fat_rlp(&mut acct_db, fat_rlp));
@ -422,3 +455,98 @@ fn rebuild_account_trie(db: &mut HashDB, account_chunk: &[&[u8]], out_chunk: &mu
} }
Ok(()) Ok(())
} }
/// Proportion of blocks which we will verify PoW for.
const POW_VERIFY_RATE: f32 = 0.02;
/// Rebuilds the blockchain from chunks.
///
/// Does basic verification for all blocks, but PoW verification for some.
/// Blocks must be fed in-order.
///
/// The first block in every chunk is disconnected from the last block in the
/// chunk before it, as chunks may be submitted out-of-order.
///
/// After all chunks have been submitted, we "glue" the chunks together.
pub struct BlockRebuilder {
chain: BlockChain,
rng: OsRng,
disconnected: Vec<(u64, H256)>,
best_number: u64,
}
impl BlockRebuilder {
/// Create a new BlockRebuilder.
pub fn new(chain: BlockChain, best_number: u64) -> Result<Self, ::error::Error> {
Ok(BlockRebuilder {
chain: chain,
rng: try!(OsRng::new()),
disconnected: Vec::new(),
best_number: best_number,
})
}
/// Feed the rebuilder an uncompressed block chunk.
/// Returns the number of blocks fed or any errors.
pub fn feed(&mut self, chunk: &[u8], engine: &Engine) -> Result<u64, ::error::Error> {
use basic_types::Seal::With;
use util::U256;
let rlp = UntrustedRlp::new(chunk);
let item_count = rlp.item_count();
trace!(target: "snapshot", "restoring block chunk with {} blocks.", item_count - 2);
// todo: assert here that these values are consistent with chunks being in order.
let mut cur_number = try!(rlp.val_at::<u64>(0)) + 1;
let mut parent_hash = try!(rlp.val_at::<H256>(1));
let parent_total_difficulty = try!(rlp.val_at::<U256>(2));
for idx in 3..item_count {
let pair = try!(rlp.at(idx));
let abridged_rlp = try!(pair.at(0)).as_raw().to_owned();
let abridged_block = AbridgedBlock::from_raw(abridged_rlp);
let receipts: Vec<::receipt::Receipt> = try!(pair.val_at(1));
let block = try!(abridged_block.to_block(parent_hash, cur_number));
let block_bytes = block.rlp_bytes(With);
if self.rng.gen::<f32>() <= POW_VERIFY_RATE {
try!(engine.verify_block_seal(&block.header))
} else {
try!(engine.verify_block_basic(&block.header, Some(&block_bytes)));
}
let is_best = cur_number == self.best_number;
// special-case the first block in each chunk.
if idx == 3 {
if self.chain.insert_snapshot_block(&block_bytes, receipts, Some(parent_total_difficulty), is_best) {
self.disconnected.push((cur_number, block.header.hash()));
}
} else {
self.chain.insert_snapshot_block(&block_bytes, receipts, None, is_best);
}
self.chain.commit();
parent_hash = BlockView::new(&block_bytes).hash();
cur_number += 1;
}
Ok(item_count as u64 - 3)
}
/// Glue together any disconnected chunks. To be called at the end.
pub fn glue_chunks(&mut self) {
for &(ref first_num, ref first_hash) in &self.disconnected {
let parent_num = first_num - 1;
// check if the parent is even in the chain.
// since we don't restore every single block in the chain,
// the first block of the first chunks has nothing to connect to.
if let Some(parent_hash) = self.chain.block_hash(parent_num) {
// if so, add the child to it.
self.chain.add_child(parent_hash, *first_hash);
}
}
}
}

View File

@ -0,0 +1,437 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Snapshot network service implementation.
use std::collections::HashSet;
use std::io::ErrorKind;
use std::fs;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use super::{ManifestData, StateRebuilder, BlockRebuilder};
use super::io::{SnapshotReader, LooseReader};
use blockchain::BlockChain;
use engines::Engine;
use error::Error;
use service::ClientIoMessage;
use spec::Spec;
use io::IoChannel;
use util::{Bytes, H256, Mutex, UtilError};
use util::journaldb::Algorithm;
use util::kvdb::{Database, DatabaseConfig};
use util::snappy;
/// Statuses for restorations.
#[derive(PartialEq, Clone, Copy, Debug)]
pub enum RestorationStatus {
/// No restoration.
Inactive,
/// Ongoing restoration.
Ongoing,
/// Failed restoration.
Failed,
}
/// Restoration info.
/// The interface for a snapshot network service.
/// This handles:
/// - restoration of snapshots to temporary databases.
/// - responding to queries for snapshot manifests and chunks
pub trait SnapshotService {
/// Query the most recent manifest data.
fn manifest(&self) -> Option<ManifestData>;
/// Get raw chunk for a given hash.
fn chunk(&self, hash: H256) -> Option<Bytes>;
/// Ask the snapshot service for the restoration status.
fn status(&self) -> RestorationStatus;
/// Ask the snapshot service for the number of chunks completed.
/// Return a tuple of (state_chunks, block_chunks).
/// Undefined when not restoring.
fn chunks_done(&self) -> (usize, usize);
/// Begin snapshot restoration.
/// If restoration in-progress, this will reset it.
/// From this point on, any previous snapshot may become unavailable.
/// Returns true if successful, false otherwise.
fn begin_restore(&self, manifest: ManifestData) -> bool;
/// Feed a raw state chunk to the service to be processed asynchronously.
/// no-op if not currently restoring.
fn restore_state_chunk(&self, hash: H256, chunk: Bytes);
/// Feed a raw block chunk to the service to be processed asynchronously.
/// no-op if currently restoring.
fn restore_block_chunk(&self, hash: H256, chunk: Bytes);
}
/// State restoration manager.
struct Restoration {
state_chunks_left: HashSet<H256>,
block_chunks_left: HashSet<H256>,
state: StateRebuilder,
blocks: BlockRebuilder,
snappy_buffer: Bytes,
final_state_root: H256,
}
impl Restoration {
// make a new restoration, building databases in the given path.
fn new(manifest: &ManifestData, pruning: Algorithm, path: &Path, gb: &[u8]) -> Result<Self, Error> {
let cfg = DatabaseConfig::with_columns(::client::DB_NO_OF_COLUMNS);
let raw_db = Arc::new(try!(Database::open(&cfg, &*path.to_string_lossy())
.map_err(|s| UtilError::SimpleString(s))));
let chain = BlockChain::new(Default::default(), gb, raw_db.clone());
let blocks = try!(BlockRebuilder::new(chain, manifest.block_number));
Ok(Restoration {
state_chunks_left: manifest.state_hashes.iter().cloned().collect(),
block_chunks_left: manifest.block_hashes.iter().cloned().collect(),
state: StateRebuilder::new(raw_db, pruning),
blocks: blocks,
snappy_buffer: Vec::new(),
final_state_root: manifest.state_root,
})
}
// feeds a state chunk
fn feed_state(&mut self, hash: H256, chunk: &[u8]) -> Result<(), Error> {
use util::trie::TrieError;
if self.state_chunks_left.remove(&hash) {
let len = try!(snappy::decompress_into(&chunk, &mut self.snappy_buffer));
try!(self.state.feed(&self.snappy_buffer[..len]));
if self.state_chunks_left.is_empty() {
let root = self.state.state_root();
if root != self.final_state_root {
warn!("Final restored state has wrong state root: expected {:?}, got {:?}", root, self.final_state_root);
return Err(TrieError::InvalidStateRoot(root).into());
}
}
}
Ok(())
}
// feeds a block chunk
fn feed_blocks(&mut self, hash: H256, chunk: &[u8], engine: &Engine) -> Result<(), Error> {
if self.block_chunks_left.remove(&hash) {
let len = try!(snappy::decompress_into(&chunk, &mut self.snappy_buffer));
try!(self.blocks.feed(&self.snappy_buffer[..len], engine));
if self.block_chunks_left.is_empty() {
// connect out-of-order chunks.
self.blocks.glue_chunks();
}
}
Ok(())
}
// is everything done?
fn is_done(&self) -> bool {
self.block_chunks_left.is_empty() && self.state_chunks_left.is_empty()
}
}
/// Type alias for client io channel.
pub type Channel = IoChannel<ClientIoMessage>;
/// Service implementation.
///
/// This will replace the client's state DB as soon as the last state chunk
/// is fed, and will replace the client's blocks DB when the last block chunk
/// is fed.
pub struct Service {
restoration: Mutex<Option<Restoration>>,
client_db: PathBuf, // "<chain hash>/<pruning>/db"
db_path: PathBuf, // "<chain hash>/"
io_channel: Channel,
pruning: Algorithm,
status: Mutex<RestorationStatus>,
reader: Option<LooseReader>,
engine: Arc<Engine>,
genesis_block: Bytes,
state_chunks: AtomicUsize,
block_chunks: AtomicUsize,
}
impl Service {
/// Create a new snapshot service.
pub fn new(spec: &Spec, pruning: Algorithm, client_db: PathBuf, io_channel: Channel) -> Result<Self, Error> {
let db_path = try!(client_db.parent().and_then(Path::parent)
.ok_or_else(|| UtilError::SimpleString("Failed to find database root.".into()))).to_owned();
let reader = {
let mut snapshot_path = db_path.clone();
snapshot_path.push("snapshot");
LooseReader::new(snapshot_path).ok()
};
let service = Service {
restoration: Mutex::new(None),
client_db: client_db,
db_path: db_path,
io_channel: io_channel,
pruning: pruning,
status: Mutex::new(RestorationStatus::Inactive),
reader: reader,
engine: spec.engine.clone(),
genesis_block: spec.genesis_block(),
state_chunks: AtomicUsize::new(0),
block_chunks: AtomicUsize::new(0),
};
// create the snapshot dir if it doesn't exist.
match fs::create_dir_all(service.snapshot_dir()) {
Err(e) => {
if e.kind() != ErrorKind::AlreadyExists {
return Err(e.into())
}
}
_ => {}
}
// delete the temporary restoration dir if it does exist.
match fs::remove_dir_all(service.restoration_dir()) {
Err(e) => {
if e.kind() != ErrorKind::NotFound {
return Err(e.into())
}
}
_ => {}
}
Ok(service)
}
// get the snapshot path.
fn snapshot_dir(&self) -> PathBuf {
let mut dir = self.db_path.clone();
dir.push("snapshot");
dir
}
// get the restoration directory.
fn restoration_dir(&self) -> PathBuf {
let mut dir = self.snapshot_dir();
dir.push("restoration");
dir
}
// restoration db path.
fn restoration_db(&self) -> PathBuf {
let mut dir = self.restoration_dir();
dir.push("db");
dir
}
// replace one the client's database with our own.
fn replace_client_db(&self) -> Result<(), Error> {
let our_db = self.restoration_db();
trace!(target: "snapshot", "replacing {:?} with {:?}", self.client_db, our_db);
let mut backup_db = self.restoration_dir();
backup_db.push("backup_db");
let _ = fs::remove_dir_all(&backup_db);
let existed = match fs::rename(&self.client_db, &backup_db) {
Ok(_) => true,
Err(e) => if let ErrorKind::NotFound = e.kind() {
false
} else {
return Err(e.into());
}
};
match fs::rename(&our_db, &self.client_db) {
Ok(_) => {
// clean up the backup.
if existed {
try!(fs::remove_dir_all(&backup_db));
}
Ok(())
}
Err(e) => {
// restore the backup.
if existed {
try!(fs::rename(&backup_db, &self.client_db));
}
Err(e.into())
}
}
}
// finalize the restoration. this accepts an already-locked
// restoration as an argument -- so acquiring it again _will_
// lead to deadlock.
fn finalize_restoration(&self, rest: &mut Option<Restoration>) -> Result<(), Error> {
trace!(target: "snapshot", "finalizing restoration");
self.state_chunks.store(0, Ordering::SeqCst);
self.block_chunks.store(0, Ordering::SeqCst);
// destroy the restoration before replacing databases.
*rest = None;
try!(self.replace_client_db());
*self.status.lock() = RestorationStatus::Inactive;
// TODO: take control of restored snapshot.
let _ = fs::remove_dir_all(self.restoration_dir());
Ok(())
}
/// Feed a chunk of either kind. no-op if no restoration or status is wrong.
fn feed_chunk(&self, hash: H256, chunk: &[u8], is_state: bool) -> Result<(), Error> {
match self.status() {
RestorationStatus::Inactive | RestorationStatus::Failed => Ok(()),
RestorationStatus::Ongoing => {
// TODO: be able to process block chunks and state chunks at same time?
let mut restoration = self.restoration.lock();
let res = {
let rest = match *restoration {
Some(ref mut r) => r,
None => return Ok(()),
};
match is_state {
true => rest.feed_state(hash, chunk),
false => rest.feed_blocks(hash, chunk, &*self.engine),
}.map(|_| rest.is_done())
};
match res {
Ok(is_done) => {
match is_state {
true => self.state_chunks.fetch_add(1, Ordering::SeqCst),
false => self.block_chunks.fetch_add(1, Ordering::SeqCst),
};
match is_done {
true => self.finalize_restoration(&mut *restoration),
false => Ok(())
}
}
other => other.map(drop),
}
}
}
}
/// Feed a state chunk to be processed synchronously.
pub fn feed_state_chunk(&self, hash: H256, chunk: &[u8]) {
match self.feed_chunk(hash, chunk, true) {
Ok(()) => (),
Err(e) => {
warn!("Encountered error during state restoration: {}", e);
*self.restoration.lock() = None;
*self.status.lock() = RestorationStatus::Failed;
let _ = fs::remove_dir_all(self.restoration_dir());
}
}
}
/// Feed a block chunk to be processed synchronously.
pub fn feed_block_chunk(&self, hash: H256, chunk: &[u8]) {
match self.feed_chunk(hash, chunk, false) {
Ok(()) => (),
Err(e) => {
warn!("Encountered error during block restoration: {}", e);
*self.restoration.lock() = None;
*self.status.lock() = RestorationStatus::Failed;
let _ = fs::remove_dir_all(self.restoration_dir());
}
}
}
}
impl SnapshotService for Service {
fn manifest(&self) -> Option<ManifestData> {
self.reader.as_ref().map(|r| r.manifest().clone())
}
fn chunk(&self, hash: H256) -> Option<Bytes> {
self.reader.as_ref().and_then(|r| r.chunk(hash).ok())
}
fn status(&self) -> RestorationStatus {
*self.status.lock()
}
fn chunks_done(&self) -> (usize, usize) {
(self.state_chunks.load(Ordering::Relaxed), self.block_chunks.load(Ordering::Relaxed))
}
fn begin_restore(&self, manifest: ManifestData) -> bool {
let rest_dir = self.restoration_dir();
let mut res = self.restoration.lock();
// tear down existing restoration.
*res = None;
// delete and restore the restoration dir.
if let Err(e) = fs::remove_dir_all(&rest_dir).and_then(|_| fs::create_dir_all(&rest_dir)) {
match e.kind() {
ErrorKind::NotFound => {},
_ => {
warn!("encountered error {} while beginning snapshot restoration.", e);
return false;
}
}
}
// make new restoration.
let db_path = self.restoration_db();
*res = match Restoration::new(&manifest, self.pruning, &db_path, &self.genesis_block) {
Ok(b) => Some(b),
Err(e) => {
warn!("encountered error {} while beginning snapshot restoration.", e);
return false;
}
};
*self.status.lock() = RestorationStatus::Ongoing;
true
}
fn restore_state_chunk(&self, hash: H256, chunk: Bytes) {
self.io_channel.send(ClientIoMessage::FeedStateChunk(hash, chunk))
.expect("snapshot service and io service are kept alive by client service; qed");
}
fn restore_block_chunk(&self, hash: H256, chunk: Bytes) {
self.io_channel.send(ClientIoMessage::FeedBlockChunk(hash, chunk))
.expect("snapshot service and io service are kept alive by client service; qed");
}
}

View File

@ -0,0 +1,91 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Block chunker and rebuilder tests.
use devtools::RandomTempPath;
use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer};
use blockchain::BlockChain;
use snapshot::{chunk_blocks, BlockRebuilder};
use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter};
use util::{Mutex, snappy};
use util::kvdb::{Database, DatabaseConfig};
use std::sync::Arc;
fn chunk_and_restore(amount: u64) {
let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default();
let genesis = canon_chain.generate(&mut finalizer).unwrap();
let db_cfg = DatabaseConfig::with_columns(::client::DB_NO_OF_COLUMNS);
let orig_path = RandomTempPath::create_dir();
let new_path = RandomTempPath::create_dir();
let mut snapshot_path = new_path.as_path().to_owned();
snapshot_path.push("SNAP");
let old_db = Arc::new(Database::open(&db_cfg, orig_path.as_str()).unwrap());
let bc = BlockChain::new(Default::default(), &genesis, old_db.clone());
// build the blockchain.
for _ in 0..amount {
let block = canon_chain.generate(&mut finalizer).unwrap();
let batch = old_db.transaction();
bc.insert_block(&batch, &block, vec![]);
bc.commit();
old_db.write(batch).unwrap();
}
let best_hash = bc.best_block_hash();
// snapshot it.
let writer = Mutex::new(PackedWriter::new(&snapshot_path).unwrap());
let block_hashes = chunk_blocks(&bc, (amount, best_hash), &writer).unwrap();
writer.into_inner().finish(::snapshot::ManifestData {
state_hashes: Vec::new(),
block_hashes: block_hashes,
state_root: Default::default(),
block_number: amount,
block_hash: best_hash,
}).unwrap();
// restore it.
let new_db = Arc::new(Database::open(&db_cfg, new_path.as_str()).unwrap());
let new_chain = BlockChain::new(Default::default(), &genesis, new_db.clone());
let mut rebuilder = BlockRebuilder::new(new_chain, amount).unwrap();
let reader = PackedReader::new(&snapshot_path).unwrap().unwrap();
let engine = ::engines::NullEngine::new(Default::default(), Default::default());
for chunk_hash in &reader.manifest().block_hashes {
let compressed = reader.chunk(*chunk_hash).unwrap();
let chunk = snappy::decompress(&compressed).unwrap();
rebuilder.feed(&chunk, &engine).unwrap();
}
rebuilder.glue_chunks();
drop(rebuilder);
// and test it.
let new_chain = BlockChain::new(Default::default(), &genesis, new_db);
assert_eq!(new_chain.best_block_hash(), best_hash);
}
#[test]
fn chunk_and_restore_500() { chunk_and_restore(500) }
#[test]
fn chunk_and_restore_40k() { chunk_and_restore(40000) }

View File

@ -0,0 +1,122 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Snapshot test helpers. These are used to build blockchains and state tries
//! which can be queried before and after a full snapshot/restore cycle.
use account_db::AccountDBMut;
use rand::Rng;
use snapshot::account::Account;
use util::hash::{FixedHash, H256};
use util::hashdb::HashDB;
use util::trie::{Alphabet, StandardMap, SecTrieDBMut, TrieMut, ValueMode};
use util::trie::{TrieDB, TrieDBMut};
use util::rlp::SHA3_NULL_RLP;
// the proportion of accounts we will alter each tick.
const ACCOUNT_CHURN: f32 = 0.01;
/// This structure will incrementally alter a state given an rng.
pub struct StateProducer {
state_root: H256,
storage_seed: H256,
}
impl StateProducer {
/// Create a new `StateProducer`.
pub fn new() -> Self {
StateProducer {
state_root: SHA3_NULL_RLP,
storage_seed: H256::zero(),
}
}
/// Tick the state producer. This alters the state, writing new data into
/// the database.
pub fn tick<R: Rng>(&mut self, rng: &mut R, db: &mut HashDB) {
// modify existing accounts.
let mut accounts_to_modify: Vec<_> = {
let trie = TrieDB::new(&*db, &self.state_root).unwrap();
trie.iter()
.filter(|_| rng.gen::<f32>() < ACCOUNT_CHURN)
.map(|(k, v)| (H256::from_slice(&k), v.to_owned()))
.collect()
};
// sweep once to alter storage tries.
for &mut (ref mut address_hash, ref mut account_data) in &mut accounts_to_modify {
let mut account = Account::from_thin_rlp(&*account_data);
let acct_db = AccountDBMut::from_hash(db, *address_hash);
fill_storage(acct_db, account.storage_root_mut(), &mut self.storage_seed);
*account_data = account.to_thin_rlp();
}
// sweep again to alter account trie.
let mut trie = TrieDBMut::from_existing(db, &mut self.state_root).unwrap();
for (address_hash, account_data) in accounts_to_modify {
trie.insert(&address_hash[..], &account_data).unwrap();
}
// add between 0 and 5 new accounts each tick.
let new_accs = rng.gen::<u32>() % 5;
for _ in 0..new_accs {
let address_hash = H256::random();
let balance: usize = rng.gen();
let nonce: usize = rng.gen();
let acc = ::account::Account::new_basic(balance.into(), nonce.into()).rlp();
trie.insert(&address_hash[..], &acc).unwrap();
}
}
/// Get the current state root.
pub fn state_root(&self) -> H256 {
self.state_root
}
}
/// Fill the storage of an account.
pub fn fill_storage(mut db: AccountDBMut, root: &mut H256, seed: &mut H256) {
let map = StandardMap {
alphabet: Alphabet::All,
min_key: 6,
journal_key: 6,
value_mode: ValueMode::Random,
count: 100,
};
{
let mut trie = if *root == SHA3_NULL_RLP {
SecTrieDBMut::new(&mut db, root)
} else {
SecTrieDBMut::from_existing(&mut db, root).unwrap()
};
for (k, v) in map.make_with(seed) {
trie.insert(&k, &v).unwrap();
}
}
}
/// Compare two state dbs.
pub fn compare_dbs(one: &HashDB, two: &HashDB) {
let keys = one.keys();
for (key, _) in keys {
assert_eq!(one.get(&key).unwrap(), two.get(&key).unwrap());
}
}

View File

@ -0,0 +1,22 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Snapshot tests.
mod blocks;
mod state;
pub mod helpers;

View File

@ -0,0 +1,82 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! State snapshotting tests.
use snapshot::{chunk_state, StateRebuilder};
use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter};
use super::helpers::{compare_dbs, StateProducer};
use rand;
use util::hash::H256;
use util::journaldb::{self, Algorithm};
use util::kvdb::{Database, DatabaseConfig};
use util::memorydb::MemoryDB;
use util::Mutex;
use devtools::RandomTempPath;
use std::sync::Arc;
#[test]
fn snap_and_restore() {
let mut producer = StateProducer::new();
let mut rng = rand::thread_rng();
let mut old_db = MemoryDB::new();
let db_cfg = DatabaseConfig::with_columns(::client::DB_NO_OF_COLUMNS);
for _ in 0..150 {
producer.tick(&mut rng, &mut old_db);
}
let snap_dir = RandomTempPath::create_dir();
let mut snap_file = snap_dir.as_path().to_owned();
snap_file.push("SNAP");
let state_root = producer.state_root();
let writer = Mutex::new(PackedWriter::new(&snap_file).unwrap());
let state_hashes = chunk_state(&old_db, &state_root, &writer).unwrap();
writer.into_inner().finish(::snapshot::ManifestData {
state_hashes: state_hashes,
block_hashes: Vec::new(),
state_root: state_root,
block_number: 0,
block_hash: H256::default(),
}).unwrap();
let mut db_path = snap_dir.as_path().to_owned();
db_path.push("db");
let db = {
let new_db = Arc::new(Database::open(&db_cfg, &db_path.to_string_lossy()).unwrap());
let mut rebuilder = StateRebuilder::new(new_db.clone(), Algorithm::Archive);
let reader = PackedReader::new(&snap_file).unwrap().unwrap();
for chunk_hash in &reader.manifest().state_hashes {
let raw = reader.chunk(*chunk_hash).unwrap();
let chunk = ::util::snappy::decompress(&raw).unwrap();
rebuilder.feed(&chunk).unwrap();
}
assert_eq!(rebuilder.state_root(), state_root);
new_db
};
let new_db = journaldb::new(db, Algorithm::Archive, ::client::DB_COL_STATE);
compare_dbs(&old_db, new_db.as_hashdb());
}

View File

@ -29,6 +29,7 @@ use std::cell::RefCell;
/// Parameters common to all engines. /// Parameters common to all engines.
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone)]
#[cfg_attr(test, derive(Default))]
pub struct CommonParams { pub struct CommonParams {
/// Account start nonce. /// Account start nonce.
pub account_start_nonce: U256, pub account_start_nonce: U256,
@ -60,7 +61,7 @@ pub struct Spec {
/// User friendly spec name /// User friendly spec name
pub name: String, pub name: String,
/// What engine are we using for this? /// What engine are we using for this?
pub engine: Box<Engine>, pub engine: Arc<Engine>,
/// The fork identifier for this chain. Only needed to distinguish two chains sharing the same genesis. /// The fork identifier for this chain. Only needed to distinguish two chains sharing the same genesis.
pub fork_name: Option<String>, pub fork_name: Option<String>,
@ -130,14 +131,14 @@ impl From<ethjson::spec::Spec> for Spec {
} }
impl Spec { impl Spec {
/// Convert engine spec into a boxed Engine of the right underlying type. /// Convert engine spec into a arc'd Engine of the right underlying type.
/// TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead. /// TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead.
fn engine(engine_spec: ethjson::spec::Engine, params: CommonParams, builtins: BTreeMap<Address, Builtin>) -> Box<Engine> { fn engine(engine_spec: ethjson::spec::Engine, params: CommonParams, builtins: BTreeMap<Address, Builtin>) -> Arc<Engine> {
match engine_spec { match engine_spec {
ethjson::spec::Engine::Null => Box::new(NullEngine::new(params, builtins)), ethjson::spec::Engine::Null => Arc::new(NullEngine::new(params, builtins)),
ethjson::spec::Engine::InstantSeal => Box::new(InstantSeal::new(params, builtins)), ethjson::spec::Engine::InstantSeal => Arc::new(InstantSeal::new(params, builtins)),
ethjson::spec::Engine::Ethash(ethash) => Box::new(ethereum::Ethash::new(params, From::from(ethash.params), builtins)), ethjson::spec::Engine::Ethash(ethash) => Arc::new(ethereum::Ethash::new(params, From::from(ethash.params), builtins)),
ethjson::spec::Engine::BasicAuthority(basic_authority) => Box::new(BasicAuthority::new(params, From::from(basic_authority.params), builtins)), ethjson::spec::Engine::BasicAuthority(basic_authority) => Arc::new(BasicAuthority::new(params, From::from(basic_authority.params), builtins)),
} }
} }

View File

@ -25,7 +25,8 @@ use miner::Miner;
#[test] #[test]
fn imports_from_empty() { fn imports_from_empty() {
let dir = RandomTempPath::new(); let dir = RandomTempPath::new();
let client = Client::new(ClientConfig::default(), get_test_spec(), dir.as_path(), Arc::new(Miner::with_spec(get_test_spec())), IoChannel::disconnected()).unwrap(); let spec = get_test_spec();
let client = Client::new(ClientConfig::default(), &spec, dir.as_path(), Arc::new(Miner::with_spec(&spec)), IoChannel::disconnected()).unwrap();
client.import_verified_blocks(); client.import_verified_blocks();
client.flush_queue(); client.flush_queue();
} }
@ -43,7 +44,8 @@ fn returns_state_root_basic() {
#[test] #[test]
fn imports_good_block() { fn imports_good_block() {
let dir = RandomTempPath::new(); let dir = RandomTempPath::new();
let client = Client::new(ClientConfig::default(), get_test_spec(), dir.as_path(), Arc::new(Miner::with_spec(get_test_spec())), IoChannel::disconnected()).unwrap(); let spec = get_test_spec();
let client = Client::new(ClientConfig::default(), &spec, dir.as_path(), Arc::new(Miner::with_spec(&spec)), IoChannel::disconnected()).unwrap();
let good_block = get_good_dummy_block(); let good_block = get_good_dummy_block();
if let Err(_) = client.import_block(good_block) { if let Err(_) = client.import_block(good_block) {
panic!("error importing block being good by definition"); panic!("error importing block being good by definition");
@ -58,7 +60,8 @@ fn imports_good_block() {
#[test] #[test]
fn query_none_block() { fn query_none_block() {
let dir = RandomTempPath::new(); let dir = RandomTempPath::new();
let client = Client::new(ClientConfig::default(), get_test_spec(), dir.as_path(), Arc::new(Miner::with_spec(get_test_spec())), IoChannel::disconnected()).unwrap(); let spec = get_test_spec();
let client = Client::new(ClientConfig::default(), &spec, dir.as_path(), Arc::new(Miner::with_spec(&spec)), IoChannel::disconnected()).unwrap();
let non_existant = client.block_header(BlockID::Number(188)); let non_existant = client.block_header(BlockID::Number(188));
assert!(non_existant.is_none()); assert!(non_existant.is_none());

View File

@ -35,7 +35,7 @@ pub enum ChainEra {
} }
pub struct TestEngine { pub struct TestEngine {
engine: Box<Engine>, engine: Arc<Engine>,
max_depth: usize max_depth: usize
} }
@ -133,7 +133,7 @@ pub fn generate_dummy_client_with_spec_and_data<F>(get_test_spec: F, block_numbe
let dir = RandomTempPath::new(); let dir = RandomTempPath::new();
let test_spec = get_test_spec(); let test_spec = get_test_spec();
let client = Client::new(ClientConfig::default(), get_test_spec(), dir.as_path(), Arc::new(Miner::with_spec(get_test_spec())), IoChannel::disconnected()).unwrap(); let client = Client::new(ClientConfig::default(), &test_spec, dir.as_path(), Arc::new(Miner::with_spec(&test_spec)), IoChannel::disconnected()).unwrap();
let test_engine = &test_spec.engine; let test_engine = &test_spec.engine;
let mut db_result = get_temp_journal_db(); let mut db_result = get_temp_journal_db();
@ -232,7 +232,8 @@ pub fn push_blocks_to_client(client: &Arc<Client>, timestamp_salt: u64, starting
pub fn get_test_client_with_blocks(blocks: Vec<Bytes>) -> GuardedTempResult<Arc<Client>> { pub fn get_test_client_with_blocks(blocks: Vec<Bytes>) -> GuardedTempResult<Arc<Client>> {
let dir = RandomTempPath::new(); let dir = RandomTempPath::new();
let client = Client::new(ClientConfig::default(), get_test_spec(), dir.as_path(), Arc::new(Miner::with_spec(get_test_spec())), IoChannel::disconnected()).unwrap(); let test_spec = get_test_spec();
let client = Client::new(ClientConfig::default(), &test_spec, dir.as_path(), Arc::new(Miner::with_spec(&test_spec)), IoChannel::disconnected()).unwrap();
for block in &blocks { for block in &blocks {
if let Err(_) = client.import_block(block.clone()) { if let Err(_) = client.import_block(block.clone()) {
panic!("panic importing block which is well-formed"); panic!("panic importing block which is well-formed");

View File

@ -30,11 +30,12 @@ pub fn run_test_worker(scope: &crossbeam::Scope, stop: Arc<AtomicBool>, socket_p
let socket_path = socket_path.to_owned(); let socket_path = socket_path.to_owned();
scope.spawn(move || { scope.spawn(move || {
let temp = RandomTempPath::create_dir(); let temp = RandomTempPath::create_dir();
let spec = get_test_spec();
let client = Client::new( let client = Client::new(
ClientConfig::default(), ClientConfig::default(),
get_test_spec(), &spec,
temp.as_path(), temp.as_path(),
Arc::new(Miner::with_spec(get_test_spec())), Arc::new(Miner::with_spec(&spec)),
IoChannel::disconnected()).unwrap(); IoChannel::disconnected()).unwrap();
let mut worker = nanoipc::Worker::new(&(client as Arc<BlockChainClient>)); let mut worker = nanoipc::Worker::new(&(client as Arc<BlockChainClient>));
worker.add_reqrep(&socket_path).unwrap(); worker.add_reqrep(&socket_path).unwrap();

View File

@ -23,7 +23,7 @@ use ipc::binary::BinaryConvertError;
use header::BlockNumber; use header::BlockNumber;
/// Information about the blockchain gathered together. /// Information about the blockchain gathered together.
#[derive(Debug, Binary)] #[derive(Clone, Debug, Binary)]
pub struct BlockChainInfo { pub struct BlockChainInfo {
/// Blockchain difficulty. /// Blockchain difficulty.
pub total_difficulty: U256, pub total_difficulty: U256,

View File

@ -136,9 +136,9 @@ fn execute_import(cmd: ImportBlockchain) -> Result<String, String> {
// build client // build client
let service = try!(ClientService::start( let service = try!(ClientService::start(
client_config, client_config,
spec, &spec,
Path::new(&client_path), Path::new(&client_path),
Arc::new(Miner::with_spec(try!(cmd.spec.spec()))), Arc::new(Miner::with_spec(&spec)),
).map_err(|e| format!("Client service error: {:?}", e))); ).map_err(|e| format!("Client service error: {:?}", e)));
panic_handler.forward_from(&service); panic_handler.forward_from(&service);
@ -246,9 +246,9 @@ fn execute_export(cmd: ExportBlockchain) -> Result<String, String> {
let service = try!(ClientService::start( let service = try!(ClientService::start(
client_config, client_config,
spec, &spec,
Path::new(&client_path), Path::new(&client_path),
Arc::new(Miner::with_spec(try!(cmd.spec.spec()))) Arc::new(Miner::with_spec(&spec)),
).map_err(|e| format!("Client service error: {:?}", e))); ).map_err(|e| format!("Client service error: {:?}", e)));
panic_handler.forward_from(&service); panic_handler.forward_from(&service);

View File

@ -32,6 +32,8 @@ Usage:
parity import [ <file> ] [options] parity import [ <file> ] [options]
parity export [ <file> ] [options] parity export [ <file> ] [options]
parity signer new-token [options] parity signer new-token [options]
parity snapshot <file> [options]
parity restore <file> [options]
Operating Options: Operating Options:
--mode MODE Set the operating mode. MODE can be one of: --mode MODE Set the operating mode. MODE can be one of:
@ -286,6 +288,8 @@ pub struct Args {
pub cmd_import: bool, pub cmd_import: bool,
pub cmd_signer: bool, pub cmd_signer: bool,
pub cmd_new_token: bool, pub cmd_new_token: bool,
pub cmd_snapshot: bool,
pub cmd_restore: bool,
pub cmd_ui: bool, pub cmd_ui: bool,
pub arg_pid_file: String, pub arg_pid_file: String,
pub arg_file: Option<String>, pub arg_file: Option<String>,

View File

@ -41,6 +41,7 @@ use run::RunCmd;
use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, DataFormat}; use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, DataFormat};
use presale::ImportWallet; use presale::ImportWallet;
use account::{AccountCmd, NewAccount, ImportAccounts}; use account::{AccountCmd, NewAccount, ImportAccounts};
use snapshot::{self, SnapshotCommand};
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum Cmd { pub enum Cmd {
@ -50,6 +51,7 @@ pub enum Cmd {
ImportPresaleWallet(ImportWallet), ImportPresaleWallet(ImportWallet),
Blockchain(BlockchainCmd), Blockchain(BlockchainCmd),
SignerToken(String), SignerToken(String),
Snapshot(SnapshotCommand),
} }
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
@ -156,6 +158,36 @@ impl Configuration {
to_block: try!(to_block_id(&self.args.flag_to)), to_block: try!(to_block_id(&self.args.flag_to)),
}; };
Cmd::Blockchain(BlockchainCmd::Export(export_cmd)) Cmd::Blockchain(BlockchainCmd::Export(export_cmd))
} else if self.args.cmd_snapshot {
let snapshot_cmd = SnapshotCommand {
cache_config: cache_config,
dirs: dirs,
spec: spec,
pruning: pruning,
logger_config: logger_config,
mode: mode,
tracing: tracing,
compaction: compaction,
file_path: self.args.arg_file.clone(),
wal: wal,
kind: snapshot::Kind::Take,
};
Cmd::Snapshot(snapshot_cmd)
} else if self.args.cmd_restore {
let restore_cmd = SnapshotCommand {
cache_config: cache_config,
dirs: dirs,
spec: spec,
pruning: pruning,
logger_config: logger_config,
mode: mode,
tracing: tracing,
compaction: compaction,
file_path: self.args.arg_file.clone(),
wal: wal,
kind: snapshot::Kind::Restore,
};
Cmd::Snapshot(restore_cmd)
} else { } else {
let daemon = if self.args.cmd_daemon { let daemon = if self.args.cmd_daemon {
Some(self.args.arg_pid_file.clone()) Some(self.args.arg_pid_file.clone())

View File

@ -82,6 +82,7 @@ mod blockchain;
mod presale; mod presale;
mod run; mod run;
mod sync; mod sync;
mod snapshot;
use std::{process, env}; use std::{process, env};
use cli::print_version; use cli::print_version;
@ -99,6 +100,7 @@ fn execute(command: Cmd) -> Result<String, String> {
Cmd::ImportPresaleWallet(presale_cmd) => presale::execute(presale_cmd), Cmd::ImportPresaleWallet(presale_cmd) => presale::execute(presale_cmd),
Cmd::Blockchain(blockchain_cmd) => blockchain::execute(blockchain_cmd), Cmd::Blockchain(blockchain_cmd) => blockchain::execute(blockchain_cmd),
Cmd::SignerToken(path) => signer::new_token(path), Cmd::SignerToken(path) => signer::new_token(path),
Cmd::Snapshot(snapshot_cmd) => snapshot::execute(snapshot_cmd),
} }
} }
@ -131,4 +133,3 @@ fn main() {
} }
} }
} }

View File

@ -179,7 +179,7 @@ pub fn setup_rpc<T: Extendable>(server: T, deps: Arc<Dependencies>, apis: ApiSet
} }
}, },
Api::Personal => { Api::Personal => {
server.add_delegate(PersonalClient::new(&deps.secret_store, &deps.client, &deps.miner, deps.signer_port).to_delegate()); server.add_delegate(PersonalClient::new(&deps.secret_store, &deps.client, &deps.miner, deps.signer_port, deps.geth_compatibility).to_delegate());
}, },
Api::Signer => { Api::Signer => {
server.add_delegate(SignerClient::new(&deps.secret_store, &deps.client, &deps.miner, &deps.signer_queue).to_delegate()); server.add_delegate(SignerClient::new(&deps.secret_store, &deps.client, &deps.miner, &deps.signer_queue).to_delegate());

View File

@ -139,7 +139,7 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> {
let account_provider = Arc::new(try!(prepare_account_provider(&cmd.dirs, cmd.acc_conf))); let account_provider = Arc::new(try!(prepare_account_provider(&cmd.dirs, cmd.acc_conf)));
// create miner // create miner
let miner = Miner::new(cmd.miner_options, cmd.gas_pricer.into(), spec, Some(account_provider.clone())); let miner = Miner::new(cmd.miner_options, cmd.gas_pricer.into(), &spec, Some(account_provider.clone()));
miner.set_author(cmd.miner_extras.author); miner.set_author(cmd.miner_extras.author);
miner.set_gas_floor_target(cmd.miner_extras.gas_floor_target); miner.set_gas_floor_target(cmd.miner_extras.gas_floor_target);
miner.set_gas_ceil_target(cmd.miner_extras.gas_ceil_target); miner.set_gas_ceil_target(cmd.miner_extras.gas_ceil_target);
@ -161,20 +161,16 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> {
fork_name.as_ref(), fork_name.as_ref(),
); );
// load spec
// TODO: make it clonable and load it only once!
let spec = try!(cmd.spec.spec());
// set up bootnodes // set up bootnodes
let mut net_conf = cmd.net_conf; let mut net_conf = cmd.net_conf;
if !cmd.custom_bootnodes { if !cmd.custom_bootnodes {
net_conf.boot_nodes = spec.nodes.clone(); net_conf.boot_nodes = spec.nodes.clone();
} }
// create client // create client service.
let service = try!(ClientService::start( let service = try!(ClientService::start(
client_config, client_config,
spec, &spec,
Path::new(&client_path), Path::new(&client_path),
miner.clone(), miner.clone(),
).map_err(|e| format!("Client service error: {:?}", e))); ).map_err(|e| format!("Client service error: {:?}", e)));

195
parity/snapshot.rs Normal file
View File

@ -0,0 +1,195 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Snapshot and restoration commands.
use std::time::Duration;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use ethcore_logger::{setup_log, Config as LogConfig};
use ethcore::snapshot::{RestorationStatus, SnapshotService};
use ethcore::snapshot::io::{SnapshotReader, PackedReader, PackedWriter};
use ethcore::service::ClientService;
use ethcore::client::{Mode, DatabaseCompactionProfile, Switch, VMType};
use ethcore::miner::Miner;
use cache::CacheConfig;
use params::{SpecType, Pruning};
use helpers::{to_client_config, execute_upgrades};
use dir::Directories;
use fdlimit;
use io::PanicHandler;
/// Kinds of snapshot commands.
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum Kind {
/// Take a snapshot.
Take,
/// Restore a snapshot.
Restore
}
/// Command for snapshot creation or restoration.
#[derive(Debug, PartialEq)]
pub struct SnapshotCommand {
pub cache_config: CacheConfig,
pub dirs: Directories,
pub spec: SpecType,
pub pruning: Pruning,
pub logger_config: LogConfig,
pub mode: Mode,
pub tracing: Switch,
pub compaction: DatabaseCompactionProfile,
pub file_path: Option<String>,
pub wal: bool,
pub kind: Kind,
}
impl SnapshotCommand {
// shared portion of snapshot commands: start the client service
fn start_service(self) -> Result<(ClientService, Arc<PanicHandler>), String> {
// Setup panic handler
let panic_handler = PanicHandler::new_in_arc();
// load spec file
let spec = try!(self.spec.spec());
// load genesis hash
let genesis_hash = spec.genesis_header().hash();
// Setup logging
let _logger = setup_log(&self.logger_config);
fdlimit::raise_fd_limit();
// select pruning algorithm
let algorithm = self.pruning.to_algorithm(&self.dirs, genesis_hash, spec.fork_name.as_ref());
// prepare client_path
let client_path = self.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm);
// execute upgrades
try!(execute_upgrades(&self.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, self.compaction.compaction_profile()));
// prepare client config
let client_config = to_client_config(&self.cache_config, &self.dirs, genesis_hash, self.mode, self.tracing, self.pruning, self.compaction, self.wal, VMType::default(), "".into(), spec.fork_name.as_ref());
let service = try!(ClientService::start(
client_config,
&spec,
Path::new(&client_path),
Arc::new(Miner::with_spec(&spec))
).map_err(|e| format!("Client service error: {:?}", e)));
Ok((service, panic_handler))
}
/// restore from a snapshot
pub fn restore(self) -> Result<(), String> {
let file = try!(self.file_path.clone().ok_or("No file path provided.".to_owned()));
let (service, _panic_handler) = try!(self.start_service());
warn!("Snapshot restoration is experimental and the format may be subject to change.");
let snapshot = service.snapshot_service();
let reader = PackedReader::new(&Path::new(&file))
.map_err(|e| format!("Couldn't open snapshot file: {}", e))
.and_then(|x| x.ok_or("Snapshot file has invalid format.".into()));
let reader = try!(reader);
let manifest = reader.manifest();
// drop the client so we don't restore while it has open DB handles.
drop(service);
if !snapshot.begin_restore(manifest.clone()) {
return Err("Failed to begin restoration.".into());
}
let (num_state, num_blocks) = (manifest.state_hashes.len(), manifest.block_hashes.len());
let informant_handle = snapshot.clone();
::std::thread::spawn(move || {
while let RestorationStatus::Ongoing = informant_handle.status() {
let (state_chunks, block_chunks) = informant_handle.chunks_done();
info!("Processed {}/{} state chunks and {}/{} block chunks.",
state_chunks, num_state, block_chunks, num_blocks);
::std::thread::sleep(Duration::from_secs(5));
}
});
info!("Restoring state");
for &state_hash in &manifest.state_hashes {
if snapshot.status() == RestorationStatus::Failed {
return Err("Restoration failed".into());
}
let chunk = try!(reader.chunk(state_hash)
.map_err(|e| format!("Encountered error while reading chunk {:?}: {}", state_hash, e)));
snapshot.feed_state_chunk(state_hash, &chunk);
}
info!("Restoring blocks");
for &block_hash in &manifest.block_hashes {
if snapshot.status() == RestorationStatus::Failed {
return Err("Restoration failed".into());
}
let chunk = try!(reader.chunk(block_hash)
.map_err(|e| format!("Encountered error while reading chunk {:?}: {}", block_hash, e)));
snapshot.feed_block_chunk(block_hash, &chunk);
}
match snapshot.status() {
RestorationStatus::Ongoing => Err("Snapshot file is incomplete and missing chunks.".into()),
RestorationStatus::Failed => Err("Snapshot restoration failed.".into()),
RestorationStatus::Inactive => {
info!("Restoration complete.");
Ok(())
}
}
}
/// Take a snapshot from the head of the chain.
pub fn take_snapshot(self) -> Result<(), String> {
let file_path = try!(self.file_path.clone().ok_or("No file path provided.".to_owned()));
let file_path: PathBuf = file_path.into();
let (service, _panic_handler) = try!(self.start_service());
warn!("Snapshots are currently experimental. File formats may be subject to change.");
let writer = try!(PackedWriter::new(&file_path)
.map_err(|e| format!("Failed to open snapshot writer: {}", e)));
if let Err(e) = service.client().take_snapshot(writer) {
let _ = ::std::fs::remove_file(&file_path);
return Err(format!("Encountered fatal error while creating snapshot: {}", e));
}
Ok(())
}
}
/// Execute this snapshot command.
pub fn execute(cmd: SnapshotCommand) -> Result<String, String> {
match cmd.kind {
Kind::Take => try!(cmd.take_snapshot()),
Kind::Restore => try!(cmd.restore()),
}
Ok(String::new())
}

View File

@ -33,16 +33,18 @@ pub struct PersonalClient<C, M> where C: MiningBlockChainClient, M: MinerService
client: Weak<C>, client: Weak<C>,
miner: Weak<M>, miner: Weak<M>,
signer_port: Option<u16>, signer_port: Option<u16>,
allow_perm_unlock: bool,
} }
impl<C, M> PersonalClient<C, M> where C: MiningBlockChainClient, M: MinerService { impl<C, M> PersonalClient<C, M> where C: MiningBlockChainClient, M: MinerService {
/// Creates new PersonalClient /// Creates new PersonalClient
pub fn new(store: &Arc<AccountProvider>, client: &Arc<C>, miner: &Arc<M>, signer_port: Option<u16>) -> Self { pub fn new(store: &Arc<AccountProvider>, client: &Arc<C>, miner: &Arc<M>, signer_port: Option<u16>, allow_perm_unlock: bool) -> Self {
PersonalClient { PersonalClient {
accounts: Arc::downgrade(store), accounts: Arc::downgrade(store),
client: Arc::downgrade(client), client: Arc::downgrade(client),
miner: Arc::downgrade(miner), miner: Arc::downgrade(miner),
signer_port: signer_port, signer_port: signer_port,
allow_perm_unlock: allow_perm_unlock,
} }
} }
@ -89,11 +91,17 @@ impl<C: 'static, M: 'static> Personal for PersonalClient<C, M> where C: MiningBl
fn unlock_account(&self, params: Params) -> Result<Value, Error> { fn unlock_account(&self, params: Params) -> Result<Value, Error> {
try!(self.active()); try!(self.active());
from_params::<(RpcH160, String, u64)>(params).and_then( from_params::<(RpcH160, String, Option<u64>)>(params).and_then(
|(account, account_pass, _)|{ |(account, account_pass, duration)|{
let account: Address = account.into(); let account: Address = account.into();
let store = take_weak!(self.accounts); let store = take_weak!(self.accounts);
match store.unlock_account_temporarily(account, account_pass) { let r = match (self.allow_perm_unlock, duration) {
(false, _) => store.unlock_account_temporarily(account, account_pass),
(true, Some(0)) => store.unlock_account_permanently(account, account_pass),
(true, Some(d)) => store.unlock_account_timed(account, account_pass, d as u32 * 1000),
(true, None) => store.unlock_account_timed(account, account_pass, 300_000),
};
match r {
Ok(_) => Ok(Value::Bool(true)), Ok(_) => Ok(Value::Bool(true)),
Err(_) => Ok(Value::Bool(false)), Err(_) => Ok(Value::Bool(false)),
} }

View File

@ -50,7 +50,7 @@ fn sync_provider() -> Arc<TestSyncProvider> {
})) }))
} }
fn miner_service(spec: Spec, accounts: Arc<AccountProvider>) -> Arc<Miner> { fn miner_service(spec: &Spec, accounts: Arc<AccountProvider>) -> Arc<Miner> {
Miner::new( Miner::new(
MinerOptions { MinerOptions {
new_work_notify: vec![], new_work_notify: vec![],
@ -65,8 +65,8 @@ fn miner_service(spec: Spec, accounts: Arc<AccountProvider>) -> Arc<Miner> {
enable_resubmission: true, enable_resubmission: true,
}, },
GasPricer::new_fixed(20_000_000_000u64.into()), GasPricer::new_fixed(20_000_000_000u64.into()),
spec, &spec,
Some(accounts) Some(accounts),
) )
} }
@ -89,7 +89,7 @@ struct EthTester {
impl EthTester { impl EthTester {
fn from_chain(chain: &BlockChain) -> Self { fn from_chain(chain: &BlockChain) -> Self {
let tester = Self::from_spec_provider(|| make_spec(chain)); let tester = Self::from_spec(make_spec(chain));
for b in &chain.blocks_rlp() { for b in &chain.blocks_rlp() {
if Block::is_good(&b) { if Block::is_good(&b) {
@ -105,13 +105,11 @@ impl EthTester {
tester tester
} }
fn from_spec_provider<F>(spec_provider: F) -> Self fn from_spec(spec: Spec) -> Self {
where F: Fn() -> Spec {
let dir = RandomTempPath::new(); let dir = RandomTempPath::new();
let account_provider = account_provider(); let account_provider = account_provider();
let miner_service = miner_service(spec_provider(), account_provider.clone()); let miner_service = miner_service(&spec, account_provider.clone());
let client = Client::new(ClientConfig::default(), spec_provider(), dir.as_path(), miner_service.clone(), IoChannel::disconnected()).unwrap(); let client = Client::new(ClientConfig::default(), &spec, dir.as_path(), miner_service.clone(), IoChannel::disconnected()).unwrap();
let sync_provider = sync_provider(); let sync_provider = sync_provider();
let external_miner = Arc::new(ExternalMiner::default()); let external_miner = Arc::new(ExternalMiner::default());
@ -291,7 +289,7 @@ fn eth_transaction_count() {
use util::crypto::Secret; use util::crypto::Secret;
let secret = Secret::from_str("8a283037bb19c4fed7b1c569e40c7dcff366165eb869110a1b11532963eb9cb2").unwrap(); let secret = Secret::from_str("8a283037bb19c4fed7b1c569e40c7dcff366165eb869110a1b11532963eb9cb2").unwrap();
let tester = EthTester::from_spec_provider(|| Spec::load(TRANSACTION_COUNT_SPEC)); let tester = EthTester::from_spec(Spec::load(TRANSACTION_COUNT_SPEC));
let address = tester.accounts.insert_account(secret, "").unwrap(); let address = tester.accounts.insert_account(secret, "").unwrap();
tester.accounts.unlock_account_permanently(address, "".into()).unwrap(); tester.accounts.unlock_account_permanently(address, "".into()).unwrap();
@ -417,7 +415,7 @@ fn verify_transaction_counts(name: String, chain: BlockChain) {
#[test] #[test]
fn starting_nonce_test() { fn starting_nonce_test() {
let tester = EthTester::from_spec_provider(|| Spec::load(POSITIVE_NONCE_SPEC)); let tester = EthTester::from_spec(Spec::load(POSITIVE_NONCE_SPEC));
let address = ::util::hash::Address::from(10); let address = ::util::hash::Address::from(10);
let sample = tester.handler.handle_request(&(r#" let sample = tester.handler.handle_request(&(r#"

View File

@ -50,7 +50,7 @@ fn setup(signer: Option<u16>) -> PersonalTester {
let accounts = accounts_provider(); let accounts = accounts_provider();
let client = blockchain_client(); let client = blockchain_client();
let miner = miner_service(); let miner = miner_service();
let personal = PersonalClient::new(&accounts, &client, &miner, signer); let personal = PersonalClient::new(&accounts, &client, &miner, signer, false);
let io = IoHandler::new(); let io = IoHandler::new();
io.add_delegate(personal.to_delegate()); io.add_delegate(personal.to_delegate());

View File

@ -586,6 +586,10 @@ impl ChainSync {
/// Called by peer once it has new block bodies /// Called by peer once it has new block bodies
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))] #[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
fn on_peer_new_block(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { fn on_peer_new_block(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
if !self.peers.get(&peer_id).map_or(false, |p| p.confirmed) {
trace!(target: "sync", "Ignoring new block from unconfirmed peer {}", peer_id);
return Ok(());
}
let block_rlp = try!(r.at(0)); let block_rlp = try!(r.at(0));
let header_rlp = try!(block_rlp.at(0)); let header_rlp = try!(block_rlp.at(0));
let h = header_rlp.as_raw().sha3(); let h = header_rlp.as_raw().sha3();
@ -650,6 +654,10 @@ impl ChainSync {
/// Handles `NewHashes` packet. Initiates headers download for any unknown hashes. /// Handles `NewHashes` packet. Initiates headers download for any unknown hashes.
fn on_peer_new_hashes(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { fn on_peer_new_hashes(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
if !self.peers.get(&peer_id).map_or(false, |p| p.confirmed) {
trace!(target: "sync", "Ignoring new hashes from unconfirmed peer {}", peer_id);
return Ok(());
}
if self.state != SyncState::Idle { if self.state != SyncState::Idle {
trace!(target: "sync", "Ignoring new hashes since we're already downloading."); trace!(target: "sync", "Ignoring new hashes since we're already downloading.");
let max = r.iter().take(MAX_NEW_HASHES).map(|item| item.val_at::<BlockNumber>(1).unwrap_or(0)).fold(0u64, max); let max = r.iter().take(MAX_NEW_HASHES).map(|item| item.val_at::<BlockNumber>(1).unwrap_or(0)).fold(0u64, max);
@ -1029,7 +1037,7 @@ impl ChainSync {
if !io.is_chain_queue_empty() { if !io.is_chain_queue_empty() {
return Ok(()); return Ok(());
} }
if self.peers.get(&peer_id).map_or(false, |p| p.confirmed) { if !self.peers.get(&peer_id).map_or(false, |p| p.confirmed) {
trace!(target: "sync", "{} Ignoring transactions from unconfirmed/unknown peer", peer_id); trace!(target: "sync", "{} Ignoring transactions from unconfirmed/unknown peer", peer_id);
} }
@ -1685,7 +1693,7 @@ mod tests {
asking_hash: None, asking_hash: None,
ask_time: 0f64, ask_time: 0f64,
expired: false, expired: false,
confirmed: false, confirmed: true,
}); });
sync sync
} }

View File

@ -43,15 +43,16 @@
//! //!
//! fn main() { //! fn main() {
//! let dir = env::temp_dir(); //! let dir = env::temp_dir();
//! let spec = ethereum::new_frontier();
//! let miner = Miner::new( //! let miner = Miner::new(
//! Default::default(), //! Default::default(),
//! GasPricer::new_fixed(20_000_000_000u64.into()), //! GasPricer::new_fixed(20_000_000_000u64.into()),
//! ethereum::new_frontier(), //! &spec,
//! None //! None
//! ); //! );
//! let client = Client::new( //! let client = Client::new(
//! ClientConfig::default(), //! ClientConfig::default(),
//! ethereum::new_frontier(), //! &spec,
//! &dir, //! &dir,
//! miner, //! miner,
//! IoChannel::disconnected() //! IoChannel::disconnected()

View File

@ -19,7 +19,7 @@
use common::*; use common::*;
use rlp::*; use rlp::*;
use hashdb::*; use hashdb::*;
use overlaydb::*; use overlaydb::OverlayDB;
use super::{DB_PREFIX_LEN, LATEST_ERA_KEY}; use super::{DB_PREFIX_LEN, LATEST_ERA_KEY};
use super::traits::JournalDB; use super::traits::JournalDB;
use kvdb::{Database, DBTransaction}; use kvdb::{Database, DBTransaction};

View File

@ -134,7 +134,7 @@ impl MemoryDB {
if key == &SHA3_NULL_RLP { if key == &SHA3_NULL_RLP {
return Some(STATIC_NULL_RLP.clone()); return Some(STATIC_NULL_RLP.clone());
} }
self.data.get(key).map(|&(ref v, x)| (&v[..], x)) self.data.get(key).map(|&(ref val, rc)| (&val[..], rc))
} }
/// Denote than an existing value has the given key. Used when a key gets removed without /// Denote than an existing value has the given key. Used when a key gets removed without