Use standard paths for Ethash cache (#5881)

* Use cache path to store ethash files.

* Fixing tests, more flexible API.

* Use AsRef<Path> everywhere.

* Fixing ethcore tests.

* Fix RPC tests.
This commit is contained in:
Tomasz Drwięga 2017-07-10 12:57:40 +02:00 committed by Arkadiy Paronyan
parent 125aa0aeb4
commit a24b6ad983
20 changed files with 210 additions and 142 deletions

View File

@ -48,7 +48,7 @@ pub trait Fetcher: Send + Sync + 'static {
}
pub struct ContentFetcher<F: Fetch = FetchClient, R: URLHint + 'static = URLHintContract> {
dapps_path: PathBuf,
cache_path: PathBuf,
resolver: R,
cache: Arc<Mutex<ContentCache>>,
sync: Arc<SyncStatus>,
@ -61,7 +61,7 @@ pub struct ContentFetcher<F: Fetch = FetchClient, R: URLHint + 'static = URLHint
impl<R: URLHint + 'static, F: Fetch> Drop for ContentFetcher<F, R> {
fn drop(&mut self) {
// Clear cache path
let _ = fs::remove_dir_all(&self.dapps_path);
let _ = fs::remove_dir_all(&self.cache_path);
}
}
@ -73,11 +73,11 @@ impl<R: URLHint + 'static, F: Fetch> ContentFetcher<F, R> {
remote: Remote,
fetch: F,
) -> Self {
let mut dapps_path = env::temp_dir();
dapps_path.push(random_filename());
let mut cache_path = env::temp_dir();
cache_path.push(random_filename());
ContentFetcher {
dapps_path: dapps_path,
cache_path: cache_path,
resolver: resolver,
sync: sync_status,
cache: Arc::new(Mutex::new(ContentCache::default())),
@ -200,7 +200,7 @@ impl<R: URLHint + 'static, F: Fetch> Fetcher for ContentFetcher<F, R> {
control,
installers::Dapp::new(
content_id.clone(),
self.dapps_path.clone(),
self.cache_path.clone(),
Box::new(on_done),
self.embeddable_on.clone(),
),
@ -219,7 +219,7 @@ impl<R: URLHint + 'static, F: Fetch> Fetcher for ContentFetcher<F, R> {
installers::Content::new(
content_id.clone(),
content.mime,
self.dapps_path.clone(),
self.cache_path.clone(),
Box::new(on_done),
),
self.embeddable_on.clone(),

View File

@ -25,7 +25,7 @@ use std::mem;
use std::ptr;
use sha3;
use std::slice;
use std::path::PathBuf;
use std::path::{Path, PathBuf};
use std::io::{self, Read, Write};
use std::fs::{self, File};
@ -86,6 +86,7 @@ impl Node {
pub type H256 = [u8; 32];
pub struct Light {
cache_dir: PathBuf,
block_number: u64,
cache: Vec<Node>,
seed_compute: Mutex<SeedHashCompute>,
@ -94,8 +95,8 @@ pub struct Light {
/// Light cache structure
impl Light {
/// Create a new light cache for a given block number
pub fn new(block_number: u64) -> Light {
light_new(block_number)
pub fn new<T: AsRef<Path>>(cache_dir: T, block_number: u64) -> Light {
light_new(cache_dir, block_number)
}
/// Calculate the light boundary data
@ -105,17 +106,15 @@ impl Light {
light_compute(self, header_hash, nonce)
}
pub fn file_path(seed_hash: H256) -> PathBuf {
let mut home = ::std::env::home_dir().unwrap();
home.push(".ethash");
home.push("light");
home.push(to_hex(&seed_hash));
home
pub fn file_path<T: AsRef<Path>>(cache_dir: T, seed_hash: H256) -> PathBuf {
let mut cache_dir = cache_dir.as_ref().to_path_buf();
cache_dir.push(to_hex(&seed_hash));
cache_dir
}
pub fn from_file(block_number: u64) -> io::Result<Light> {
pub fn from_file<T: AsRef<Path>>(cache_dir: T, block_number: u64) -> io::Result<Light> {
let seed_compute = SeedHashCompute::new();
let path = Light::file_path(seed_compute.get_seedhash(block_number));
let path = Light::file_path(&cache_dir, seed_compute.get_seedhash(block_number));
let mut file = File::open(path)?;
let cache_size = get_cache_size(block_number);
@ -128,19 +127,22 @@ impl Light {
let buf = unsafe { slice::from_raw_parts_mut(nodes.as_mut_ptr() as *mut u8, cache_size) };
file.read_exact(buf)?;
Ok(Light {
block_number,
cache_dir: cache_dir.as_ref().to_path_buf(),
cache: nodes,
block_number: block_number,
seed_compute: Mutex::new(seed_compute),
})
}
pub fn to_file(&self) -> io::Result<PathBuf> {
let seed_compute = self.seed_compute.lock();
let path = Light::file_path(seed_compute.get_seedhash(self.block_number));
let path = Light::file_path(&self.cache_dir, seed_compute.get_seedhash(self.block_number));
if self.block_number >= ETHASH_EPOCH_LENGTH * 2 {
let deprecated = Light::file_path(
seed_compute.get_seedhash(self.block_number - ETHASH_EPOCH_LENGTH * 2));
&self.cache_dir,
seed_compute.get_seedhash(self.block_number - ETHASH_EPOCH_LENGTH * 2)
);
if deprecated.exists() {
debug!(target: "ethash", "removing: {:?}", &deprecated);
@ -341,14 +343,12 @@ fn calculate_dag_item(node_index: u32, cache: &[Node]) -> Node {
}
}
fn light_new(block_number: u64) -> Light {
fn light_new<T: AsRef<Path>>(cache_dir: T, block_number: u64) -> Light {
let seed_compute = SeedHashCompute::new();
let seedhash = seed_compute.get_seedhash(block_number);
let cache_size = get_cache_size(block_number);
if cache_size % NODE_BYTES != 0 {
panic!("Unaligned cache size");
}
assert!(cache_size % NODE_BYTES == 0, "Unaligned cache size");
let num_nodes = cache_size / NODE_BYTES;
let mut nodes = Vec::with_capacity(num_nodes);
@ -372,8 +372,9 @@ fn light_new(block_number: u64) -> Light {
}
Light {
block_number,
cache_dir: cache_dir.as_ref().to_path_buf(),
cache: nodes,
block_number: block_number,
seed_compute: Mutex::new(seed_compute),
}
}
@ -432,7 +433,7 @@ fn test_light_compute() {
let boundary = [0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2, 0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a, 0xe9, 0x7e, 0x53, 0x84];
let nonce = 0xd7b3ac70a301a249;
// difficulty = 0x085657254bd9u64;
let light = Light::new(486382);
let light = Light::new(&::std::env::temp_dir(), 486382);
let result = light_compute(&light, &hash, nonce);
assert_eq!(result.mix_hash[..], mix_hash[..]);
assert_eq!(result.value[..], boundary[..]);
@ -471,15 +472,16 @@ fn test_seed_compute_after_newer() {
#[test]
fn test_drop_old_data() {
let first = Light::new(0).to_file().unwrap();
let path = ::std::env::temp_dir();
let first = Light::new(&path, 0).to_file().unwrap();
let second = Light::new(ETHASH_EPOCH_LENGTH).to_file().unwrap();
let second = Light::new(&path, ETHASH_EPOCH_LENGTH).to_file().unwrap();
assert!(fs::metadata(&first).is_ok());
let _ = Light::new(ETHASH_EPOCH_LENGTH * 2).to_file();
let _ = Light::new(&path, ETHASH_EPOCH_LENGTH * 2).to_file();
assert!(fs::metadata(&first).is_err());
assert!(fs::metadata(&second).is_ok());
let _ = Light::new(ETHASH_EPOCH_LENGTH * 3).to_file();
let _ = Light::new(&path, ETHASH_EPOCH_LENGTH * 3).to_file();
assert!(fs::metadata(&second).is_err());
}

View File

@ -25,6 +25,7 @@ extern crate log;
mod compute;
use std::mem;
use std::path::{Path, PathBuf};
use compute::Light;
pub use compute::{ETHASH_EPOCH_LENGTH, H256, ProofOfWork, SeedHashCompute, quick_get_difficulty, slow_get_seedhash};
@ -41,12 +42,14 @@ struct LightCache {
/// Light/Full cache manager.
pub struct EthashManager {
cache: Mutex<LightCache>,
cache_dir: PathBuf,
}
impl EthashManager {
/// Create a new new instance of ethash manager
pub fn new() -> EthashManager {
pub fn new<T: AsRef<Path>>(cache_dir: T) -> EthashManager {
EthashManager {
cache_dir: cache_dir.as_ref().to_path_buf(),
cache: Mutex::new(LightCache {
recent_epoch: None,
recent: None,
@ -88,11 +91,11 @@ impl EthashManager {
};
match light {
None => {
let light = match Light::from_file(block_number) {
let light = match Light::from_file(&self.cache_dir, block_number) {
Ok(light) => Arc::new(light),
Err(e) => {
debug!("Light cache file not found for {}:{}", block_number, e);
let light = Light::new(block_number);
let light = Light::new(&self.cache_dir, block_number);
if let Err(e) = light.to_file() {
warn!("Light cache file write error: {}", e);
}
@ -112,7 +115,7 @@ impl EthashManager {
#[test]
fn test_lru() {
let ethash = EthashManager::new();
let ethash = EthashManager::new(&::std::env::temp_dir());
let hash = [0u8; 32];
ethash.compute_light(1, &hash, 1);
ethash.compute_light(50000, &hash, 1);

View File

@ -266,7 +266,7 @@ mod tests {
/// Create a new test chain spec with `BasicAuthority` consensus engine.
fn new_test_authority() -> Spec {
let bytes: &[u8] = include_bytes!("../../res/basic_authority.json");
Spec::load(bytes).expect("invalid chain spec")
Spec::load(::std::env::temp_dir(), bytes).expect("invalid chain spec")
}
#[test]

View File

@ -386,7 +386,6 @@ pub trait Engine : Sync + Send {
}
}
/// Common engine utilities
pub mod common {
use block::ExecutedBlock;

View File

@ -14,6 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::path::Path;
use ethash::{quick_get_difficulty, slow_get_seedhash, EthashManager};
use util::*;
use block::*;
@ -24,7 +25,7 @@ use header::{Header, BlockNumber};
use state::CleanupMode;
use spec::CommonParams;
use transaction::UnverifiedTransaction;
use engines::Engine;
use engines::{self, Engine};
use evm::Schedule;
use ethjson;
use rlp::{self, UntrustedRlp};
@ -147,12 +148,17 @@ pub struct Ethash {
impl Ethash {
/// Create a new instance of Ethash engine
pub fn new(params: CommonParams, ethash_params: EthashParams, builtins: BTreeMap<Address, Builtin>) -> Arc<Self> {
pub fn new<T: AsRef<Path>>(
cache_dir: T,
params: CommonParams,
ethash_params: EthashParams,
builtins: BTreeMap<Address, Builtin>,
) -> Arc<Self> {
Arc::new(Ethash {
params: params,
ethash_params: ethash_params,
builtins: builtins,
pow: EthashManager::new(),
params,
ethash_params,
builtins,
pow: EthashManager::new(cache_dir),
})
}
}
@ -165,7 +171,7 @@ impl Ethash {
// for any block in the chain.
// in the future, we might move the Ethash epoch
// caching onto this mechanism as well.
impl ::engines::EpochVerifier for Arc<Ethash> {
impl engines::EpochVerifier for Arc<Ethash> {
fn verify_light(&self, _header: &Header) -> Result<(), Error> { Ok(()) }
fn verify_heavy(&self, header: &Header) -> Result<(), Error> {
self.verify_block_unordered(header, None)
@ -262,7 +268,7 @@ impl Engine for Arc<Ethash> {
_begins_epoch: bool,
) -> Result<(), Error> {
let parent_hash = block.fields().header.parent_hash().clone();
::engines::common::push_last_hash(block, last_hashes, self, &parent_hash)?;
engines::common::push_last_hash(block, last_hashes, self, &parent_hash)?;
if block.fields().header.number() == self.ethash_params.dao_hardfork_transition {
let state = block.fields_mut().state;
for child in &self.ethash_params.dao_hardfork_accounts {
@ -404,8 +410,8 @@ impl Engine for Arc<Ethash> {
Ok(())
}
fn epoch_verifier<'a>(&self, _header: &Header, _proof: &'a [u8]) -> ::engines::ConstructedVerifier<'a> {
::engines::ConstructedVerifier::Trusted(Box::new(self.clone()))
fn epoch_verifier<'a>(&self, _header: &Header, _proof: &'a [u8]) -> engines::ConstructedVerifier<'a> {
engines::ConstructedVerifier::Trusted(Box::new(self.clone()))
}
fn snapshot_components(&self) -> Option<Box<::snapshot::SnapshotComponents>> {
@ -558,13 +564,18 @@ mod tests {
use engines::Engine;
use error::{BlockError, Error};
use header::Header;
use spec::Spec;
use super::super::{new_morden, new_homestead_test};
use super::{Ethash, EthashParams, PARITY_GAS_LIMIT_DETERMINANT, ecip1017_eras_block_reward};
use rlp;
fn test_spec() -> Spec {
new_morden(&::std::env::temp_dir())
}
#[test]
fn on_close_block() {
let spec = new_morden();
let spec = test_spec();
let engine = &*spec.engine;
let genesis_header = spec.genesis_header();
let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
@ -576,7 +587,7 @@ mod tests {
#[test]
fn on_close_block_with_uncle() {
let spec = new_morden();
let spec = test_spec();
let engine = &*spec.engine;
let genesis_header = spec.genesis_header();
let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
@ -594,14 +605,14 @@ mod tests {
#[test]
fn has_valid_metadata() {
let engine = new_morden().engine;
let engine = test_spec().engine;
assert!(!engine.name().is_empty());
assert!(engine.version().major >= 1);
}
#[test]
fn can_return_schedule() {
let engine = new_morden().engine;
let engine = test_spec().engine;
let schedule = engine.schedule(10000000);
assert!(schedule.stack_limit > 0);
@ -611,8 +622,8 @@ mod tests {
#[test]
fn can_do_seal_verification_fail() {
let engine = new_morden().engine;
//let engine = Ethash::new_test(new_morden());
let engine = test_spec().engine;
//let engine = Ethash::new_test(test_spec());
let header: Header = Header::default();
let verify_result = engine.verify_block_basic(&header, None);
@ -626,7 +637,7 @@ mod tests {
#[test]
fn can_do_difficulty_verification_fail() {
let engine = new_morden().engine;
let engine = test_spec().engine;
let mut header: Header = Header::default();
header.set_seal(vec![rlp::encode(&H256::zero()).into_vec(), rlp::encode(&H64::zero()).into_vec()]);
@ -641,7 +652,7 @@ mod tests {
#[test]
fn can_do_proof_of_work_verification_fail() {
let engine = new_morden().engine;
let engine = test_spec().engine;
let mut header: Header = Header::default();
header.set_seal(vec![rlp::encode(&H256::zero()).into_vec(), rlp::encode(&H64::zero()).into_vec()]);
header.set_difficulty(U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffaaaaaaaaaaaaaaaaaaaa").unwrap());
@ -657,7 +668,7 @@ mod tests {
#[test]
fn can_do_seal_unordered_verification_fail() {
let engine = new_morden().engine;
let engine = test_spec().engine;
let header: Header = Header::default();
let verify_result = engine.verify_block_unordered(&header, None);
@ -671,7 +682,7 @@ mod tests {
#[test]
fn can_do_seal256_verification_fail() {
let engine = new_morden().engine;
let engine = test_spec().engine;
let mut header: Header = Header::default();
header.set_seal(vec![rlp::encode(&H256::zero()).into_vec(), rlp::encode(&H64::zero()).into_vec()]);
let verify_result = engine.verify_block_unordered(&header, None);
@ -685,7 +696,7 @@ mod tests {
#[test]
fn can_do_proof_of_work_unordered_verification_fail() {
let engine = new_morden().engine;
let engine = test_spec().engine;
let mut header: Header = Header::default();
header.set_seal(vec![rlp::encode(&H256::from("b251bd2e0283d0658f2cadfdc8ca619b5de94eca5742725e2e757dd13ed7503d")).into_vec(), rlp::encode(&H64::zero()).into_vec()]);
header.set_difficulty(U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffaaaaaaaaaaaaaaaaaaaa").unwrap());
@ -701,7 +712,7 @@ mod tests {
#[test]
fn can_verify_block_family_genesis_fail() {
let engine = new_morden().engine;
let engine = test_spec().engine;
let header: Header = Header::default();
let parent_header: Header = Header::default();
@ -716,7 +727,7 @@ mod tests {
#[test]
fn can_verify_block_family_difficulty_fail() {
let engine = new_morden().engine;
let engine = test_spec().engine;
let mut header: Header = Header::default();
header.set_number(2);
let mut parent_header: Header = Header::default();
@ -733,7 +744,7 @@ mod tests {
#[test]
fn can_verify_block_family_gas_fail() {
let engine = new_morden().engine;
let engine = test_spec().engine;
let mut header: Header = Header::default();
header.set_number(2);
header.set_difficulty(U256::from_str("0000000000000000000000000000000000000000000000000000000000020000").unwrap());
@ -763,7 +774,7 @@ mod tests {
fn difficulty_frontier() {
let spec = new_homestead_test();
let ethparams = get_default_ethash_params();
let ethash = Ethash::new(spec.params().clone(), ethparams, BTreeMap::new());
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
let mut parent_header = Header::default();
parent_header.set_number(1000000);
@ -781,7 +792,7 @@ mod tests {
fn difficulty_homestead() {
let spec = new_homestead_test();
let ethparams = get_default_ethash_params();
let ethash = Ethash::new(spec.params().clone(), ethparams, BTreeMap::new());
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
let mut parent_header = Header::default();
parent_header.set_number(1500000);
@ -838,7 +849,7 @@ mod tests {
ecip1010_pause_transition: 3000000,
..get_default_ethash_params()
};
let ethash = Ethash::new(spec.params().clone(), ethparams, BTreeMap::new());
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
let mut parent_header = Header::default();
parent_header.set_number(3500000);
@ -872,7 +883,7 @@ mod tests {
ecip1010_continue_transition: 5000000,
..get_default_ethash_params()
};
let ethash = Ethash::new(spec.params().clone(), ethparams, BTreeMap::new());
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
let mut parent_header = Header::default();
parent_header.set_number(5000102);
@ -917,7 +928,8 @@ mod tests {
#[test]
fn gas_limit_is_multiple_of_determinant() {
let spec = new_homestead_test();
let ethash = Ethash::new(spec.params().clone(), get_default_ethash_params(), BTreeMap::new());
let ethparams = get_default_ethash_params();
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
let mut parent = Header::new();
let mut header = Header::new();
header.set_number(1);
@ -961,7 +973,7 @@ mod tests {
fn difficulty_max_timestamp() {
let spec = new_homestead_test();
let ethparams = get_default_ethash_params();
let ethash = Ethash::new(spec.params().clone(), ethparams, BTreeMap::new());
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
let mut parent_header = Header::default();
parent_header.set_number(1000000);
@ -989,7 +1001,7 @@ mod tests {
header.set_number(parent_header.number() + 1);
header.set_gas_limit(100_001.into());
header.set_difficulty(ethparams.minimum_difficulty);
let ethash = Ethash::new(spec.params().clone(), ethparams, BTreeMap::new());
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
assert!(ethash.verify_block_family(&header, &parent_header, None).is_ok());
parent_header.set_number(9);
@ -1044,7 +1056,7 @@ mod tests {
nonce: U256::zero(),
}.sign(keypair.secret(), None).into();
let ethash = Ethash::new(spec.params().clone(), ethparams, BTreeMap::new());
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
assert!(ethash.verify_transaction_basic(&tx1, &header).is_ok());
assert!(ethash.verify_transaction_basic(&tx2, &header).is_ok());

View File

@ -27,6 +27,7 @@ pub mod denominations;
pub use self::ethash::{Ethash};
pub use self::denominations::*;
use std::path::Path;
use super::spec::*;
/// Most recent fork block that we support on Mainnet.
@ -38,51 +39,56 @@ pub const FORK_SUPPORTED_ROPSTEN: u64 = 10;
/// Most recent fork block that we support on Kovan.
pub const FORK_SUPPORTED_KOVAN: u64 = 0;
fn load(b: &[u8]) -> Spec {
Spec::load(b).expect("chain spec is invalid")
fn load<'a, T: 'a + Into<Option<&'a Path>>>(cache_dir: T, b: &[u8]) -> Spec {
match cache_dir.into() {
Some(path) => Spec::load(path, b),
None => Spec::load(&::std::env::temp_dir(), b)
}.expect("chain spec is invalid")
}
/// Create a new Foundation Olympic chain spec.
pub fn new_olympic() -> Spec { load(include_bytes!("../../res/ethereum/olympic.json")) }
pub fn new_olympic(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/olympic.json")) }
/// Create a new Foundation Mainnet chain spec.
pub fn new_foundation() -> Spec { load(include_bytes!("../../res/ethereum/foundation.json")) }
pub fn new_foundation(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/foundation.json")) }
/// Create a new Classic Mainnet chain spec without the DAO hardfork.
pub fn new_classic() -> Spec { load(include_bytes!("../../res/ethereum/classic.json")) }
pub fn new_classic(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/classic.json")) }
/// Create a new Expanse mainnet chain spec.
pub fn new_expanse() -> Spec { load(include_bytes!("../../res/ethereum/expanse.json")) }
pub fn new_expanse(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/expanse.json")) }
/// Create a new Kovan testnet chain spec.
pub fn new_kovan() -> Spec { load(include_bytes!("../../res/ethereum/kovan.json")) }
/// Create a new Foundation Frontier-era chain spec as though it never changes to Homestead.
pub fn new_frontier_test() -> Spec { load(include_bytes!("../../res/ethereum/frontier_test.json")) }
/// Create a new Foundation Homestead-era chain spec as though it never changed from Frontier.
pub fn new_homestead_test() -> Spec { load(include_bytes!("../../res/ethereum/homestead_test.json")) }
/// Create a new Foundation Homestead-EIP150-era chain spec as though it never changed from Homestead/Frontier.
pub fn new_eip150_test() -> Spec { load(include_bytes!("../../res/ethereum/eip150_test.json")) }
/// Create a new Foundation Homestead-EIP161-era chain spec as though it never changed from Homestead/Frontier.
pub fn new_eip161_test() -> Spec { load(include_bytes!("../../res/ethereum/eip161_test.json")) }
/// Create a new Foundation Frontier/Homestead/DAO chain spec with transition points at #5 and #8.
pub fn new_transition_test() -> Spec { load(include_bytes!("../../res/ethereum/transition_test.json")) }
/// Create a new Foundation Mainnet chain spec without genesis accounts.
pub fn new_mainnet_like() -> Spec { load(include_bytes!("../../res/ethereum/frontier_like_test.json")) }
/// Create a new Foundation Metropolis era spec.
pub fn new_metropolis_test() -> Spec { load(include_bytes!("../../res/ethereum/metropolis_test.json")) }
pub fn new_kovan(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/kovan.json")) }
/// Create a new Foundation Ropsten chain spec.
pub fn new_ropsten() -> Spec { load(include_bytes!("../../res/ethereum/ropsten.json")) }
pub fn new_ropsten(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/ropsten.json")) }
/// Create a new Morden chain spec.
pub fn new_morden() -> Spec { load(include_bytes!("../../res/ethereum/morden.json")) }
pub fn new_morden(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/morden.json")) }
// For tests
/// Create a new Foundation Frontier-era chain spec as though it never changes to Homestead.
pub fn new_frontier_test() -> Spec { load(None, include_bytes!("../../res/ethereum/frontier_test.json")) }
/// Create a new Foundation Homestead-era chain spec as though it never changed from Frontier.
pub fn new_homestead_test() -> Spec { load(None, include_bytes!("../../res/ethereum/homestead_test.json")) }
/// Create a new Foundation Homestead-EIP150-era chain spec as though it never changed from Homestead/Frontier.
pub fn new_eip150_test() -> Spec { load(None, include_bytes!("../../res/ethereum/eip150_test.json")) }
/// Create a new Foundation Homestead-EIP161-era chain spec as though it never changed from Homestead/Frontier.
pub fn new_eip161_test() -> Spec { load(None, include_bytes!("../../res/ethereum/eip161_test.json")) }
/// Create a new Foundation Frontier/Homestead/DAO chain spec with transition points at #5 and #8.
pub fn new_transition_test() -> Spec { load(None, include_bytes!("../../res/ethereum/transition_test.json")) }
/// Create a new Foundation Mainnet chain spec without genesis accounts.
pub fn new_mainnet_like() -> Spec { load(None, include_bytes!("../../res/ethereum/frontier_like_test.json")) }
/// Create a new Foundation Metropolis era spec.
pub fn new_metropolis_test() -> Spec { load(None, include_bytes!("../../res/ethereum/metropolis_test.json")) }
#[cfg(test)]
mod tests {
@ -94,7 +100,7 @@ mod tests {
#[test]
fn ensure_db_good() {
let spec = new_morden();
let spec = new_morden(&::std::env::temp_dir());
let engine = &spec.engine;
let genesis_header = spec.genesis_header();
let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
@ -109,7 +115,7 @@ mod tests {
#[test]
fn morden() {
let morden = new_morden();
let morden = new_morden(&::std::env::temp_dir());
assert_eq!(morden.state_root(), "f3f4696bbf3b3b07775128eb7a3763279a394e382130f27c21e70233e04946a9".into());
let genesis = morden.genesis_block();
@ -120,7 +126,7 @@ mod tests {
#[test]
fn frontier() {
let frontier = new_foundation();
let frontier = new_foundation(&::std::env::temp_dir());
assert_eq!(frontier.state_root(), "d7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544".into());
let genesis = frontier.genesis_block();
@ -128,4 +134,23 @@ mod tests {
let _ = frontier.engine;
}
#[test]
fn all_spec_files_valid() {
let tmp = ::std::env::temp_dir();
new_olympic(&tmp);
new_foundation(&tmp);
new_classic(&tmp);
new_expanse(&tmp);
new_kovan(&tmp);
new_ropsten(&tmp);
new_morden(&tmp);
new_frontier_test();
new_homestead_test();
new_eip150_test();
new_eip161_test();
new_transition_test();
new_mainnet_like();
new_metropolis_test();
}
}

View File

@ -32,7 +32,7 @@
//! use ethcore::miner::{Miner, MinerService};
//!
//! fn main() {
//! let miner: Miner = Miner::with_spec(&ethereum::new_foundation());
//! let miner: Miner = Miner::with_spec(&ethereum::new_foundation(&env::temp_dir()));
//! // get status
//! assert_eq!(miner.status().transactions_in_pending_queue, 0);
//!

View File

@ -59,7 +59,7 @@ lazy_static! {
/// `native_contracts::test_contracts::ValidatorSet` provides a native wrapper for the ABi.
fn spec_fixed_to_contract() -> Spec {
let data = include_bytes!("test_validator_contract.json");
Spec::load(&data[..]).unwrap()
Spec::load(&::std::env::temp_dir(), &data[..]).unwrap()
}
// creates an account provider, filling it with accounts from all the given

View File

@ -158,7 +158,7 @@ pub struct Spec {
genesis_state: PodState,
}
fn load_from(s: ethjson::spec::Spec) -> Result<Spec, Error> {
fn load_from<T: AsRef<Path>>(cache_dir: T, s: ethjson::spec::Spec) -> Result<Spec, Error> {
let builtins = s.accounts.builtins().into_iter().map(|p| (p.0.into(), From::from(p.1))).collect();
let g = Genesis::from(s.genesis);
let GenericSeal(seal_rlp) = g.seal.into();
@ -166,7 +166,7 @@ fn load_from(s: ethjson::spec::Spec) -> Result<Spec, Error> {
let mut s = Spec {
name: s.name.clone().into(),
engine: Spec::engine(s.engine, params, builtins),
engine: Spec::engine(cache_dir, s.engine, params, builtins),
data_dir: s.data_dir.unwrap_or(s.name).into(),
nodes: s.nodes.unwrap_or_else(Vec::new),
parent_hash: g.parent_hash,
@ -195,18 +195,26 @@ fn load_from(s: ethjson::spec::Spec) -> Result<Spec, Error> {
macro_rules! load_bundled {
($e:expr) => {
Spec::load(include_bytes!(concat!("../../res/", $e, ".json")) as &[u8]).expect(concat!("Chain spec ", $e, " is invalid."))
Spec::load(
&::std::env::temp_dir(),
include_bytes!(concat!("../../res/", $e, ".json")) as &[u8]
).expect(concat!("Chain spec ", $e, " is invalid."))
};
}
impl Spec {
/// Convert engine spec into a arc'd Engine of the right underlying type.
/// TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead.
fn engine(engine_spec: ethjson::spec::Engine, params: CommonParams, builtins: BTreeMap<Address, Builtin>) -> Arc<Engine> {
fn engine<T: AsRef<Path>>(
cache_dir: T,
engine_spec: ethjson::spec::Engine,
params: CommonParams,
builtins: BTreeMap<Address, Builtin>,
) -> Arc<Engine> {
match engine_spec {
ethjson::spec::Engine::Null => Arc::new(NullEngine::new(params, builtins)),
ethjson::spec::Engine::InstantSeal(instant) => Arc::new(InstantSeal::new(params, instant.params.registrar.map_or_else(Address::new, Into::into), builtins)),
ethjson::spec::Engine::Ethash(ethash) => Arc::new(ethereum::Ethash::new(params, From::from(ethash.params), builtins)),
ethjson::spec::Engine::Ethash(ethash) => Arc::new(ethereum::Ethash::new(cache_dir, params, From::from(ethash.params), builtins)),
ethjson::spec::Engine::BasicAuthority(basic_authority) => Arc::new(BasicAuthority::new(params, From::from(basic_authority.params), builtins)),
ethjson::spec::Engine::AuthorityRound(authority_round) => AuthorityRound::new(params, From::from(authority_round.params), builtins).expect("Failed to start AuthorityRound consensus engine."),
ethjson::spec::Engine::Tendermint(tendermint) => Tendermint::new(params, From::from(tendermint.params), builtins).expect("Failed to start the Tendermint consensus engine."),
@ -397,13 +405,13 @@ impl Spec {
/// Loads spec from json file. Provide factories for executing contracts and ensuring
/// storage goes to the right place.
pub fn load<R>(reader: R) -> Result<Self, String> where R: Read {
pub fn load<T: AsRef<Path>, R>(cache_dir: T, reader: R) -> Result<Self, String> where R: Read {
fn fmt<F: ::std::fmt::Display>(f: F) -> String {
format!("Spec json is invalid: {}", f)
}
ethjson::spec::Spec::load(reader).map_err(fmt)
.and_then(|x| load_from(x).map_err(fmt))
.and_then(|x| load_from(cache_dir, x).map_err(fmt))
}
/// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus.
@ -453,7 +461,7 @@ mod tests {
// https://github.com/paritytech/parity/issues/1840
#[test]
fn test_load_empty() {
assert!(Spec::load(&[] as &[u8]).is_err());
assert!(Spec::load(::std::env::temp_dir(), &[] as &[u8]).is_err());
}
#[test]

View File

@ -52,7 +52,7 @@ fn imports_from_empty() {
#[test]
fn should_return_registrar() {
let dir = RandomTempPath::new();
let spec = ethereum::new_morden();
let spec = ethereum::new_morden(&dir);
let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
let client_db = Arc::new(Database::open(&db_config, dir.as_path().to_str().unwrap()).unwrap());

View File

@ -71,7 +71,7 @@ pub fn execute(cmd: AccountCmd) -> Result<String, String> {
}
fn keys_dir(path: String, spec: SpecType) -> Result<RootDiskDirectory, String> {
let spec = spec.spec()?;
let spec = spec.spec(&::std::env::temp_dir())?;
let mut path = PathBuf::from(&path);
path.push(spec.data_dir);
RootDiskDirectory::create(path).map_err(|e| format!("Could not open keys directory: {}", e))

View File

@ -148,7 +148,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result<(), String> {
let timer = Instant::now();
// load spec file
let spec = cmd.spec.spec()?;
let spec = cmd.spec.spec(&cmd.dirs.cache)?;
// load genesis hash
let genesis_hash = spec.genesis_header().hash();
@ -320,7 +320,7 @@ fn start_client(
) -> Result<ClientService, String> {
// load spec file
let spec = spec.spec()?;
let spec = spec.spec(&dirs.cache)?;
// load genesis hash
let genesis_hash = spec.genesis_header().hash();
@ -517,7 +517,7 @@ fn execute_export_state(cmd: ExportState) -> Result<(), String> {
}
pub fn kill_db(cmd: KillBlockchain) -> Result<(), String> {
let spec = cmd.spec.spec()?;
let spec = cmd.spec.spec(&cmd.dirs.cache)?;
let genesis_hash = spec.genesis_header().hash();
let db_dirs = cmd.dirs.database(genesis_hash, None, spec.data_dir);
let user_defaults_path = db_dirs.user_defaults_path();

View File

@ -34,7 +34,7 @@ use rpc::{IpcConfiguration, HttpConfiguration, WsConfiguration, UiConfiguration}
use rpc_apis::ApiSet;
use parity_rpc::NetworkSettings;
use cache::CacheConfig;
use helpers::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_price, replace_home, replace_home_for_db,
use helpers::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_price, replace_home, replace_home_and_local,
geth_ipc_path, parity_ipc_path, to_bootnodes, to_addresses, to_address, to_gas_limit, to_queue_strategy};
use params::{SpecType, ResealPolicy, AccountsConfig, GasPricerConfig, MinerExtras, Pruning, Switch};
use ethcore_logger::Config as LogConfig;
@ -893,14 +893,20 @@ impl Configuration {
let local_path = default_local_path();
let base_path = self.args.flag_base_path.as_ref().or_else(|| self.args.flag_datadir.as_ref()).map_or_else(|| default_data_path(), |s| s.clone());
let data_path = replace_home("", &base_path);
let base_db_path = if self.args.flag_base_path.is_some() && self.args.flag_db_path.is_none() {
// If base_path is set and db_path is not we default to base path subdir instead of LOCAL.
let is_using_base_path = self.args.flag_base_path.is_some();
// If base_path is set and db_path is not we default to base path subdir instead of LOCAL.
let base_db_path = if is_using_base_path && self.args.flag_db_path.is_none() {
"$BASE/chains"
} else {
self.args.flag_db_path.as_ref().map_or(dir::CHAINS_PATH, |s| &s)
};
let cache_path = if is_using_base_path {
"$BASE/cache".into()
} else {
replace_home_and_local(&data_path, &local_path, &dir::CACHE_PATH)
};
let db_path = replace_home_for_db(&data_path, &local_path, &base_db_path);
let db_path = replace_home_and_local(&data_path, &local_path, &base_db_path);
let keys_path = replace_home(&data_path, &self.args.flag_keys_path);
let dapps_path = replace_home(&data_path, &self.args.flag_dapps_path);
let secretstore_path = replace_home(&data_path, &self.args.flag_secretstore_path);
@ -924,6 +930,7 @@ impl Configuration {
Directories {
keys: keys_path,
base: data_path,
cache: cache_path,
db: db_path,
dapps: dapps_path,
signer: ui_path,

View File

@ -18,7 +18,7 @@ use std::fs;
use std::path::{PathBuf, Path};
use util::{H64, H256};
use util::journaldb::Algorithm;
use helpers::{replace_home, replace_home_for_db};
use helpers::{replace_home, replace_home_and_local};
use app_dirs::{AppInfo, get_app_root, AppDataType};
#[cfg(target_os = "macos")] const AUTHOR: &'static str = "Parity";
@ -34,6 +34,9 @@ use app_dirs::{AppInfo, get_app_root, AppDataType};
#[cfg(target_os = "windows")] pub const CHAINS_PATH: &'static str = "$LOCAL/chains";
#[cfg(not(target_os = "windows"))] pub const CHAINS_PATH: &'static str = "$BASE/chains";
#[cfg(target_os = "windows")] pub const CACHE_PATH: &'static str = "$LOCAL/cache";
#[cfg(not(target_os = "windows"))] pub const CACHE_PATH: &'static str = "$BASE/cache";
// this const is irrelevent cause we do have migrations now,
// but we still use it for backwards compatibility
const LEGACY_CLIENT_DB_VER_STR: &'static str = "5.3";
@ -42,6 +45,7 @@ const LEGACY_CLIENT_DB_VER_STR: &'static str = "5.3";
pub struct Directories {
pub base: String,
pub db: String,
pub cache: String,
pub keys: String,
pub signer: String,
pub dapps: String,
@ -54,7 +58,8 @@ impl Default for Directories {
let local_dir = default_local_path();
Directories {
base: replace_home(&data_dir, "$BASE"),
db: replace_home_for_db(&data_dir, &local_dir, CHAINS_PATH),
db: replace_home_and_local(&data_dir, &local_dir, CHAINS_PATH),
cache: replace_home_and_local(&data_dir, &local_dir, CACHE_PATH),
keys: replace_home(&data_dir, "$BASE/keys"),
signer: replace_home(&data_dir, "$BASE/signer"),
dapps: replace_home(&data_dir, "$BASE/dapps"),
@ -67,6 +72,7 @@ impl Directories {
pub fn create_dirs(&self, dapps_enabled: bool, signer_enabled: bool, secretstore_enabled: bool) -> Result<(), String> {
fs::create_dir_all(&self.base).map_err(|e| e.to_string())?;
fs::create_dir_all(&self.db).map_err(|e| e.to_string())?;
fs::create_dir_all(&self.cache).map_err(|e| e.to_string())?;
fs::create_dir_all(&self.keys).map_err(|e| e.to_string())?;
if signer_enabled {
fs::create_dir_all(&self.signer).map_err(|e| e.to_string())?;
@ -231,7 +237,7 @@ pub fn default_hypervisor_path() -> String {
#[cfg(test)]
mod tests {
use super::Directories;
use helpers::{replace_home, replace_home_for_db};
use helpers::{replace_home, replace_home_and_local};
#[test]
fn test_default_directories() {
@ -239,10 +245,14 @@ mod tests {
let local_dir = super::default_local_path();
let expected = Directories {
base: replace_home(&data_dir, "$BASE"),
db: replace_home_for_db(&data_dir, &local_dir,
db: replace_home_and_local(&data_dir, &local_dir,
if cfg!(target_os = "windows") { "$LOCAL/chains" }
else { "$BASE/chains" }
),
cache: replace_home_and_local(&data_dir, &local_dir,
if cfg!(target_os = "windows") { "$LOCAL/cache" }
else { "$BASE/cache" }
),
keys: replace_home(&data_dir, "$BASE/keys"),
signer: replace_home(&data_dir, "$BASE/signer"),
dapps: replace_home(&data_dir, "$BASE/dapps"),

View File

@ -140,7 +140,7 @@ pub fn replace_home(base: &str, arg: &str) -> String {
r.replace("/", &::std::path::MAIN_SEPARATOR.to_string())
}
pub fn replace_home_for_db(base: &str, local: &str, arg: &str) -> String {
pub fn replace_home_and_local(base: &str, local: &str, arg: &str) -> String {
let r = replace_home(base, arg);
r.replace("$LOCAL", local)
}

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::{str, fs, fmt};
use std::{str, fs, fmt, path};
use std::time::Duration;
use util::{Address, U256, version_data};
use util::journaldb::Algorithm;
@ -79,19 +79,20 @@ impl fmt::Display for SpecType {
}
impl SpecType {
pub fn spec(&self) -> Result<Spec, String> {
pub fn spec<T: AsRef<path::Path>>(&self, cache_dir: T) -> Result<Spec, String> {
let cache_dir = cache_dir.as_ref();
match *self {
SpecType::Foundation => Ok(ethereum::new_foundation()),
SpecType::Morden => Ok(ethereum::new_morden()),
SpecType::Ropsten => Ok(ethereum::new_ropsten()),
SpecType::Olympic => Ok(ethereum::new_olympic()),
SpecType::Classic => Ok(ethereum::new_classic()),
SpecType::Expanse => Ok(ethereum::new_expanse()),
SpecType::Kovan => Ok(ethereum::new_kovan()),
SpecType::Foundation => Ok(ethereum::new_foundation(cache_dir)),
SpecType::Morden => Ok(ethereum::new_morden(cache_dir)),
SpecType::Ropsten => Ok(ethereum::new_ropsten(cache_dir)),
SpecType::Olympic => Ok(ethereum::new_olympic(cache_dir)),
SpecType::Classic => Ok(ethereum::new_classic(cache_dir)),
SpecType::Expanse => Ok(ethereum::new_expanse(cache_dir)),
SpecType::Kovan => Ok(ethereum::new_kovan(cache_dir)),
SpecType::Dev => Ok(Spec::new_instant()),
SpecType::Custom(ref filename) => {
let file = fs::File::open(filename).map_err(|_| "Could not load specification file.")?;
Spec::load(file)
let file = fs::File::open(filename).map_err(|e| format!("Could not load specification file at {}: {}", filename, e))?;
Spec::load(cache_dir, file)
}
}
}

View File

@ -168,7 +168,7 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) ->
use util::RwLock;
// load spec
let spec = cmd.spec.spec()?;
let spec = cmd.spec.spec(&cmd.dirs.cache)?;
// load genesis hash
let genesis_hash = spec.genesis_header().hash();
@ -352,7 +352,7 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> R
}
// load spec
let spec = cmd.spec.spec()?;
let spec = cmd.spec.spec(&cmd.dirs.cache)?;
// load genesis hash
let genesis_hash = spec.genesis_header().hash();

View File

@ -133,7 +133,7 @@ impl SnapshotCommand {
// shared portion of snapshot commands: start the client service
fn start_service(self) -> Result<ClientService, String> {
// load spec file
let spec = self.spec.spec()?;
let spec = self.spec.spec(&self.dirs.cache)?;
// load genesis hash
let genesis_hash = spec.genesis_header().hash();

View File

@ -15,6 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! rpc integration tests.
use std::env;
use std::sync::Arc;
use std::time::Duration;
@ -318,7 +319,7 @@ const POSITIVE_NONCE_SPEC: &'static [u8] = br#"{
#[test]
fn eth_transaction_count() {
let secret = "8a283037bb19c4fed7b1c569e40c7dcff366165eb869110a1b11532963eb9cb2".parse().unwrap();
let tester = EthTester::from_spec(Spec::load(TRANSACTION_COUNT_SPEC).expect("invalid chain spec"));
let tester = EthTester::from_spec(Spec::load(&env::temp_dir(), TRANSACTION_COUNT_SPEC).expect("invalid chain spec"));
let address = tester.accounts.insert_account(secret, "").unwrap();
tester.accounts.unlock_account_permanently(address, "".into()).unwrap();
@ -444,7 +445,7 @@ fn verify_transaction_counts(name: String, chain: BlockChain) {
#[test]
fn starting_nonce_test() {
let tester = EthTester::from_spec(Spec::load(POSITIVE_NONCE_SPEC).expect("invalid chain spec"));
let tester = EthTester::from_spec(Spec::load(&env::temp_dir(), POSITIVE_NONCE_SPEC).expect("invalid chain spec"));
let address = Address::from(10);
let sample = tester.handler.handle_request_sync(&(r#"