New data paths with upgrade

This commit is contained in:
arkpar 2016-12-12 16:51:07 +01:00
parent 95af942fc9
commit 6601fde328
23 changed files with 259 additions and 87 deletions

View File

@ -1,6 +1,6 @@
{
"name": "Ethereum Classic",
"forkName": "classic",
"dataDir": "classic",
"engine": {
"Ethash": {
"params": {

View File

@ -1,6 +1,6 @@
{
"name": "Expanse",
"forkName": "expanse",
"dataDir": "expanse",
"engine": {
"Ethash": {
"params": {

View File

@ -1,5 +1,6 @@
{
"name": "Frontier/Homestead",
"dataDir": "ethereum",
"engine": {
"Ethash": {
"params": {

View File

@ -1,5 +1,6 @@
{
"name": "Morden",
"dataDir": "test",
"engine": {
"Ethash": {
"params": {

View File

@ -1,5 +1,6 @@
{
"name": "Ropsten",
"dataDir": "test",
"engine": {
"Ethash": {
"params": {

View File

@ -77,9 +77,6 @@ impl ClientService {
panic_handler.forward_from(&io_service);
info!("Configured for {} using {} engine", Colour::White.bold().paint(spec.name.clone()), Colour::Yellow.bold().paint(spec.engine.name()));
if spec.fork_name.is_some() {
warn!("Your chain is an alternative fork. {}", Colour::Red.bold().paint("TRANSACTIONS MAY BE REPLAYED ON THE MAINNET!"));
}
let mut db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS);

View File

@ -67,7 +67,7 @@ pub struct Spec {
/// What engine are we using for this?
pub engine: Arc<Engine>,
/// The fork identifier for this chain. Only needed to distinguish two chains sharing the same genesis.
pub fork_name: Option<String>,
pub data_dir: String,
/// Known nodes on the network in enode format.
pub nodes: Vec<String>,
@ -110,10 +110,10 @@ impl From<ethjson::spec::Spec> for Spec {
let seal: GenericSeal = g.seal.into();
let params = CommonParams::from(s.params);
Spec {
name: s.name.into(),
name: s.name.clone().into(),
params: params.clone(),
engine: Spec::engine(s.engine, params, builtins),
fork_name: s.fork_name.map(Into::into),
data_dir: s.data_dir.unwrap_or(s.name).into(),
nodes: s.nodes.unwrap_or_else(Vec::new),
parent_hash: g.parent_hash,
transactions_root: g.transactions_root,

View File

@ -27,8 +27,8 @@ pub struct Spec {
/// Spec name.
pub name: String,
/// Special fork name.
#[serde(rename="forkName")]
pub fork_name: Option<String>,
#[serde(rename="dataDir")]
pub data_dir: Option<String>,
/// Engine.
pub engine: Engine,
/// Spec params.
@ -57,6 +57,7 @@ mod tests {
fn spec_deserialization() {
let s = r#"{
"name": "Morden",
"dataDir": "morden",
"engine": {
"Ethash": {
"params": {

View File

@ -14,23 +14,32 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::path::PathBuf;
use ethcore::ethstore::{EthStore, SecretStore, import_accounts, read_geth_accounts};
use ethcore::ethstore::dir::DiskDirectory;
use ethcore::account_provider::AccountProvider;
use helpers::{password_prompt, password_from_file};
use params::SpecType;
#[derive(Debug, PartialEq)]
pub enum AccountCmd {
New(NewAccount),
List(String),
List(ListAccounts),
Import(ImportAccounts),
ImportFromGeth(ImportFromGethAccounts)
}
#[derive(Debug, PartialEq)]
pub struct ListAccounts {
pub path: String,
pub spec: SpecType,
}
#[derive(Debug, PartialEq)]
pub struct NewAccount {
pub iterations: u32,
pub path: String,
pub spec: SpecType,
pub password_file: Option<String>,
}
@ -38,6 +47,7 @@ pub struct NewAccount {
pub struct ImportAccounts {
pub from: Vec<String>,
pub to: String,
pub spec: SpecType,
}
/// Parameters for geth accounts' import
@ -47,18 +57,22 @@ pub struct ImportFromGethAccounts {
pub testnet: bool,
/// directory to import accounts to
pub to: String,
pub spec: SpecType,
}
pub fn execute(cmd: AccountCmd) -> Result<String, String> {
match cmd {
AccountCmd::New(new_cmd) => new(new_cmd),
AccountCmd::List(path) => list(path),
AccountCmd::List(list_cmd) => list(list_cmd),
AccountCmd::Import(import_cmd) => import(import_cmd),
AccountCmd::ImportFromGeth(import_geth_cmd) => import_geth(import_geth_cmd)
}
}
fn keys_dir(path: String) -> Result<DiskDirectory, String> {
fn keys_dir(path: String, spec: SpecType) -> Result<DiskDirectory, String> {
let spec = try!(spec.spec());
let mut path = PathBuf::from(&path);
path.push(spec.data_dir);
DiskDirectory::create(path).map_err(|e| format!("Could not open keys directory: {}", e))
}
@ -75,15 +89,15 @@ fn new(n: NewAccount) -> Result<String, String> {
None => try!(password_prompt()),
};
let dir = Box::new(try!(keys_dir(n.path)));
let dir = Box::new(try!(keys_dir(n.path, n.spec)));
let secret_store = Box::new(try!(secret_store(dir, Some(n.iterations))));
let acc_provider = AccountProvider::new(secret_store);
let new_account = try!(acc_provider.new_account(&password).map_err(|e| format!("Could not create new account: {}", e)));
Ok(format!("{:?}", new_account))
}
fn list(path: String) -> Result<String, String> {
let dir = Box::new(try!(keys_dir(path)));
fn list(list_cmd: ListAccounts) -> Result<String, String> {
let dir = Box::new(try!(keys_dir(list_cmd.path, list_cmd.spec)));
let secret_store = Box::new(try!(secret_store(dir, None)));
let acc_provider = AccountProvider::new(secret_store);
let accounts = acc_provider.accounts();
@ -96,7 +110,7 @@ fn list(path: String) -> Result<String, String> {
}
fn import(i: ImportAccounts) -> Result<String, String> {
let to = try!(keys_dir(i.to));
let to = try!(keys_dir(i.to, i.spec));
let mut imported = 0;
for path in &i.from {
let from = DiskDirectory::at(path);
@ -109,7 +123,7 @@ fn import_geth(i: ImportFromGethAccounts) -> Result<String, String> {
use std::io::ErrorKind;
use ethcore::ethstore::Error;
let dir = Box::new(try!(keys_dir(i.to)));
let dir = Box::new(try!(keys_dir(i.to, i.spec)));
let secret_store = Box::new(try!(secret_store(dir, None)));
let geth_accounts = read_geth_accounts(i.testnet);
match secret_store.import_geth_accounts(geth_accounts, i.testnet) {

View File

@ -150,7 +150,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result<String, String> {
let genesis_hash = spec.genesis_header().hash();
// database paths
let db_dirs = cmd.dirs.database(genesis_hash, spec.fork_name.clone());
let db_dirs = cmd.dirs.database(genesis_hash, None, spec.data_dir.clone());
// user defaults path
let user_defaults_path = db_dirs.user_defaults_path();
@ -174,7 +174,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result<String, String> {
let snapshot_path = db_dirs.snapshot_path();
// execute upgrades
try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.fork_path().as_path())));
try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path())));
// prepare client config
let mut client_config = to_client_config(
@ -321,7 +321,7 @@ fn start_client(
let genesis_hash = spec.genesis_header().hash();
// database paths
let db_dirs = dirs.database(genesis_hash, spec.fork_name.clone());
let db_dirs = dirs.database(genesis_hash, None, spec.data_dir.clone());
// user defaults path
let user_defaults_path = db_dirs.user_defaults_path();
@ -345,7 +345,7 @@ fn start_client(
let snapshot_path = db_dirs.snapshot_path();
// execute upgrades
try!(execute_upgrades(&db_dirs, algorithm, compaction.compaction_profile(db_dirs.fork_path().as_path())));
try!(execute_upgrades(&db_dirs, algorithm, compaction.compaction_profile(db_dirs.db_root_path().as_path())));
// prepare client config
let client_config = to_client_config(&cache_config, Mode::Active, tracing, fat_db, compaction, wal, VMType::default(), "".into(), algorithm, pruning_history, true);

View File

@ -81,7 +81,7 @@ usage! {
flag_mode_alarm: u64 = 3600u64, or |c: &Config| otry!(c.parity).mode_alarm.clone(),
flag_chain: String = "homestead", or |c: &Config| otry!(c.parity).chain.clone(),
flag_db_path: String = "$HOME/.parity", or |c: &Config| otry!(c.parity).db_path.clone(),
flag_keys_path: String = "$HOME/.parity/keys", or |c: &Config| otry!(c.parity).keys_path.clone(),
flag_keys_path: String = "$DATA/keys", or |c: &Config| otry!(c.parity).keys_path.clone(),
flag_identity: String = "", or |c: &Config| otry!(c.parity).identity.clone(),
// -- Account Options
@ -100,7 +100,7 @@ usage! {
or |c: &Config| otry!(c.ui).port.clone(),
flag_ui_interface: String = "local",
or |c: &Config| otry!(c.ui).interface.clone(),
flag_ui_path: String = "$HOME/.parity/signer",
flag_ui_path: String = "$DATA/signer",
or |c: &Config| otry!(c.ui).path.clone(),
// NOTE [todr] For security reasons don't put this to config files
flag_ui_no_validation: bool = false, or |_| None,
@ -156,7 +156,7 @@ usage! {
// IPC
flag_no_ipc: bool = false,
or |c: &Config| otry!(c.ipc).disable.clone(),
flag_ipc_path: String = "$HOME/.parity/jsonrpc.ipc",
flag_ipc_path: String = "$DATA/jsonrpc.ipc",
or |c: &Config| otry!(c.ipc).path.clone(),
flag_ipc_apis: String = "web3,eth,net,parity,parity_accounts,traces,rpc",
or |c: &Config| otry!(c.ipc).apis.clone().map(|vec| vec.join(",")),
@ -170,7 +170,7 @@ usage! {
or |c: &Config| otry!(c.dapps).interface.clone(),
flag_dapps_hosts: String = "none",
or |c: &Config| otry!(c.dapps).hosts.clone().map(|vec| vec.join(",")),
flag_dapps_path: String = "$HOME/.parity/dapps",
flag_dapps_path: String = "$DATA/dapps",
or |c: &Config| otry!(c.dapps).path.clone(),
flag_dapps_user: Option<String> = None,
or |c: &Config| otry!(c.dapps).user.clone().map(Some),
@ -271,7 +271,7 @@ usage! {
or |c: &Config| otry!(c.vm).jit.clone(),
// -- Miscellaneous Options
flag_config: String = "$HOME/.parity/config.toml", or |_| None,
flag_config: String = "$DATA/config.toml", or |_| None,
flag_logging: Option<String> = None,
or |c: &Config| otry!(c.misc).logging.clone().map(Some),
flag_log_file: Option<String> = None,

View File

@ -145,7 +145,7 @@ macro_rules! usage {
}
let config_file = raw_args.flag_config.clone().unwrap_or_else(|| raw_args.clone().into_args(Config::default()).flag_config);
let config_file = replace_home(&config_file);
let config_file = replace_home("", &config_file);
let config = match (fs::File::open(&config_file), raw_args.flag_config.is_some()) {
// Load config file
(Ok(mut file), _) => {

View File

@ -40,7 +40,7 @@ use signer::{Configuration as SignerConfiguration};
use run::RunCmd;
use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, ExportState, DataFormat};
use presale::ImportWallet;
use account::{AccountCmd, NewAccount, ImportAccounts, ImportFromGethAccounts};
use account::{AccountCmd, NewAccount, ListAccounts, ImportAccounts, ImportFromGethAccounts};
use snapshot::{self, SnapshotCommand};
#[derive(Debug, PartialEq)]
@ -112,15 +112,21 @@ impl Configuration {
let new_acc = NewAccount {
iterations: self.args.flag_keys_iterations,
path: dirs.keys,
spec: spec,
password_file: self.args.flag_password.first().cloned(),
};
AccountCmd::New(new_acc)
} else if self.args.cmd_list {
AccountCmd::List(dirs.keys)
let list_acc = ListAccounts {
path: dirs.keys,
spec: spec,
};
AccountCmd::List(list_acc)
} else if self.args.cmd_import {
let import_acc = ImportAccounts {
from: self.args.arg_path.clone(),
to: dirs.keys,
spec: spec,
};
AccountCmd::Import(import_acc)
} else {
@ -130,6 +136,7 @@ impl Configuration {
} else if self.args.flag_import_geth_keys {
let account_cmd = AccountCmd::ImportFromGeth(
ImportFromGethAccounts {
spec: spec,
to: dirs.keys,
testnet: self.args.flag_testnet
}
@ -139,6 +146,7 @@ impl Configuration {
let presale_cmd = ImportWallet {
iterations: self.args.flag_keys_iterations,
path: dirs.keys,
spec: spec,
wallet_path: self.args.arg_path.first().unwrap().clone(),
password_file: self.args.flag_password.first().cloned(),
};
@ -530,7 +538,7 @@ impl Configuration {
ret.snapshot_peers = self.snapshot_peers();
ret.allow_ips = try!(self.allow_ips());
ret.max_pending_peers = self.max_pending_peers();
let mut net_path = PathBuf::from(self.directories().db);
let mut net_path = PathBuf::from(self.directories().data);
net_path.push("network");
ret.config_path = Some(net_path.to_str().unwrap().to_owned());
ret.reserved_nodes = try!(self.init_reserved_nodes());
@ -624,18 +632,11 @@ impl Configuration {
fn directories(&self) -> Directories {
use util::path;
let db_path = replace_home(self.args.flag_datadir.as_ref().unwrap_or(&self.args.flag_db_path));
let data_path = replace_home("", self.args.flag_datadir.as_ref().unwrap_or(&self.args.flag_db_path));
let keys_path = replace_home(
if self.args.flag_testnet {
"$HOME/.parity/testnet_keys"
} else {
&self.args.flag_keys_path
}
);
let dapps_path = replace_home(&self.args.flag_dapps_path);
let ui_path = replace_home(&self.args.flag_ui_path);
let keys_path = replace_home(&data_path, &self.args.flag_keys_path);
let dapps_path = replace_home(&data_path, &self.args.flag_dapps_path);
let ui_path = replace_home(&data_path, &self.args.flag_ui_path);
if self.args.flag_geth && !cfg!(windows) {
let geth_root = if self.args.flag_testnet { path::ethereum::test() } else { path::ethereum::default() };
@ -644,7 +645,7 @@ impl Configuration {
}
if cfg!(feature = "ipc") && !cfg!(feature = "windows") {
let mut path_buf = PathBuf::from(db_path.clone());
let mut path_buf = PathBuf::from(data_path.clone());
path_buf.push("ipc");
let ipc_path = path_buf.to_str().unwrap();
::std::fs::create_dir_all(ipc_path).unwrap_or_else(
@ -654,7 +655,7 @@ impl Configuration {
Directories {
keys: keys_path,
db: db_path,
data: data_path,
dapps: dapps_path,
signer: ui_path,
}
@ -664,7 +665,7 @@ impl Configuration {
if self.args.flag_geth {
geth_ipc_path(self.args.flag_testnet)
} else {
parity_ipc_path(&self.args.flag_ipcpath.clone().unwrap_or(self.args.flag_ipc_path.clone()))
parity_ipc_path(&self.directories().data, &self.args.flag_ipcpath.clone().unwrap_or(self.args.flag_ipc_path.clone()))
}
}
@ -764,7 +765,7 @@ mod tests {
let conf = parse(&args);
assert_eq!(conf.into_command().unwrap().cmd, Cmd::Account(AccountCmd::New(NewAccount {
iterations: 10240,
path: replace_home("$HOME/.parity/keys"),
path: replace_home("", "$HOME/.parity/keys"),
password_file: None,
})));
}
@ -774,7 +775,7 @@ mod tests {
let args = vec!["parity", "account", "list"];
let conf = parse(&args);
assert_eq!(conf.into_command().unwrap().cmd, Cmd::Account(
AccountCmd::List(replace_home("$HOME/.parity/keys")),
AccountCmd::List(replace_home("", "$HOME/.parity/keys")),
));
}
@ -784,7 +785,7 @@ mod tests {
let conf = parse(&args);
assert_eq!(conf.into_command().unwrap().cmd, Cmd::Account(AccountCmd::Import(ImportAccounts {
from: vec!["my_dir".into(), "another_dir".into()],
to: replace_home("$HOME/.parity/keys"),
to: replace_home("", "$HOME/.parity/keys"),
})));
}
@ -794,7 +795,7 @@ mod tests {
let conf = parse(&args);
assert_eq!(conf.into_command().unwrap().cmd, Cmd::ImportPresaleWallet(ImportWallet {
iterations: 10240,
path: replace_home("$HOME/.parity/keys"),
path: replace_home("", "$HOME/.parity/keys"),
wallet_path: "my_wallet.json".into(),
password_file: Some("pwd".into()),
}));
@ -895,7 +896,7 @@ mod tests {
fn test_command_signer_new_token() {
let args = vec!["parity", "signer", "new-token"];
let conf = parse(&args);
let expected = replace_home("$HOME/.parity/signer");
let expected = replace_home("", "$HOME/.parity/signer");
assert_eq!(conf.into_command().unwrap().cmd, Cmd::SignerToken(SignerConfiguration {
enabled: true,
signer_path: expected,

View File

@ -41,7 +41,7 @@ impl Default for Configuration {
hosts: Some(Vec::new()),
user: None,
pass: None,
dapps_path: replace_home("$HOME/.parity/dapps"),
dapps_path: replace_home("", "$HOME/.parity/dapps"),
}
}
}

View File

@ -26,7 +26,7 @@ const LEGACY_CLIENT_DB_VER_STR: &'static str = "5.3";
#[derive(Debug, PartialEq)]
pub struct Directories {
pub db: String,
pub data: String,
pub keys: String,
pub signer: String,
pub dapps: String,
@ -35,17 +35,17 @@ pub struct Directories {
impl Default for Directories {
fn default() -> Self {
Directories {
db: replace_home("$HOME/.parity"),
keys: replace_home("$HOME/.parity/keys"),
signer: replace_home("$HOME/.parity/signer"),
dapps: replace_home("$HOME/.parity/dapps"),
data: replace_home("", "$HOME/.parity"),
keys: replace_home("", "$HOME/.parity/keys"),
signer: replace_home("", "$HOME/.parity/signer"),
dapps: replace_home("", "$HOME/.parity/dapps"),
}
}
}
impl Directories {
pub fn create_dirs(&self, dapps_enabled: bool, signer_enabled: bool) -> Result<(), String> {
try!(fs::create_dir_all(&self.db).map_err(|e| e.to_string()));
try!(fs::create_dir_all(&self.data).map_err(|e| e.to_string()));
try!(fs::create_dir_all(&self.keys).map_err(|e| e.to_string()));
if signer_enabled {
try!(fs::create_dir_all(&self.signer).map_err(|e| e.to_string()));
@ -57,20 +57,38 @@ impl Directories {
}
/// Database paths.
pub fn database(&self, genesis_hash: H256, fork_name: Option<String>) -> DatabaseDirectories {
pub fn database(&self, genesis_hash: H256, fork_name: Option<String>, spec_name: String) -> DatabaseDirectories {
DatabaseDirectories {
path: self.db.clone(),
path: self.data.clone(),
genesis_hash: genesis_hash,
fork_name: fork_name,
spec_name: spec_name,
}
}
/// Get the ipc sockets path
pub fn ipc_path(&self) -> PathBuf {
let mut dir = Path::new(&self.db).to_path_buf();
let mut dir = Path::new(&self.data).to_path_buf();
dir.push("ipc");
dir
}
// TODO: remove in 1.7
pub fn legacy_keys_path(&self, testnet: bool) -> PathBuf {
let mut dir = Path::new(&self.data).to_path_buf();
if testnet {
dir.push("testnet_keys");
} else {
dir.push("keys");
}
dir
}
pub fn keys_path(&self, spec_name: &str) -> PathBuf {
let mut dir = PathBuf::from(&self.keys);
dir.push(spec_name);
dir
}
}
#[derive(Debug, PartialEq)]
@ -78,47 +96,93 @@ pub struct DatabaseDirectories {
pub path: String,
pub genesis_hash: H256,
pub fork_name: Option<String>,
pub spec_name: String,
}
impl DatabaseDirectories {
/// Base DB directory for the given fork.
pub fn fork_path(&self) -> PathBuf {
// TODO: remove in 1.7
pub fn legacy_fork_path(&self) -> PathBuf {
let mut dir = Path::new(&self.path).to_path_buf();
dir.push(format!("{:?}{}", H64::from(self.genesis_hash), self.fork_name.as_ref().map(|f| format!("-{}", f)).unwrap_or_default()));
dir
}
/// Get the root path for database
pub fn version_path(&self, pruning: Algorithm) -> PathBuf {
let mut dir = self.fork_path();
dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str()));
pub fn spec_root_path(&self) -> PathBuf {
let mut dir = Path::new(&self.path).to_path_buf();
dir.push("chains");
dir.push(&self.spec_name);
dir
}
/// Get the path for the databases given the genesis_hash and information on the databases.
pub fn client_path(&self, pruning: Algorithm) -> PathBuf {
let mut dir = self.version_path(pruning);
let mut dir = self.db_root_path();
dir.push(pruning.as_internal_name_str());
dir.push("db");
dir
}
pub fn db_root_path(&self) -> PathBuf {
let mut dir = self.spec_root_path();
dir.push("db");
dir.push(H64::from(self.genesis_hash).hex());
dir
}
pub fn db_path(&self, pruning: Algorithm) -> PathBuf {
let mut dir = self.db_root_path();
dir.push(pruning.as_internal_name_str());
dir
}
/// Get the root path for database
// TODO: remove in 1.7
pub fn legacy_version_path(&self, pruning: Algorithm) -> PathBuf {
let mut dir = self.legacy_fork_path();
dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str()));
dir
}
/// Get user defaults path
// TODO: remove in 1.7
pub fn legacy_user_defaults_path(&self) -> PathBuf {
let mut dir = self.legacy_fork_path();
dir.push("user_defaults");
dir
}
/// Get user defaults path
// TODO: remove in 1.7
pub fn legacy_snapshot_path(&self) -> PathBuf {
let mut dir = self.legacy_fork_path();
dir.push("snapshot");
dir
}
/// Get user defaults path
// TODO: remove in 1.7
pub fn legacy_network_path(&self) -> PathBuf {
let mut dir = self.legacy_fork_path();
dir.push("network");
dir
}
pub fn user_defaults_path(&self) -> PathBuf {
let mut dir = self.fork_path();
let mut dir = self.spec_root_path();
dir.push("user_defaults");
dir
}
/// Get the path for the snapshot directory given the genesis hash and fork name.
pub fn snapshot_path(&self) -> PathBuf {
let mut dir = self.fork_path();
let mut dir = self.db_root_path();
dir.push("snapshot");
dir
}
/// Get the path for the network directory.
pub fn network_path(&self) -> PathBuf {
let mut dir = self.fork_path();
let mut dir = self.spec_root_path();
dir.push("network");
dir
}

View File

@ -24,7 +24,7 @@ use ethcore::client::{Mode, BlockId, VMType, DatabaseCompactionProfile, ClientCo
use ethcore::miner::{PendingSet, GasLimit, PrioritizationStrategy};
use cache::CacheConfig;
use dir::DatabaseDirectories;
use upgrade::upgrade;
use upgrade::{upgrade, upgrade_data_paths};
use migration::migrate;
use ethsync::is_valid_node_url;
@ -132,9 +132,10 @@ pub fn to_price(s: &str) -> Result<f32, String> {
}
/// Replaces `$HOME` str with home directory path.
pub fn replace_home(arg: &str) -> String {
pub fn replace_home(base: &str, arg: &str) -> String {
// the $HOME directory on mac os should be `~/Library` or `~/Library/Application Support`
let r = arg.replace("$HOME", env::home_dir().unwrap().to_str().unwrap());
let r = r.replace("$DATA", base );
r.replace("/", &::std::path::MAIN_SEPARATOR.to_string() )
}
@ -159,13 +160,13 @@ pub fn geth_ipc_path(testnet: bool) -> String {
}
/// Formats and returns parity ipc path.
pub fn parity_ipc_path(s: &str) -> String {
pub fn parity_ipc_path(base: &str, s: &str) -> String {
// Windows path should not be hardcoded here.
if cfg!(windows) {
return r"\\.\pipe\parity.jsonrpc".to_owned();
}
replace_home(s)
replace_home(base, s)
}
/// Validates and formats bootnodes option.
@ -187,7 +188,7 @@ pub fn to_bootnodes(bootnodes: &Option<String>) -> Result<Vec<String>, String> {
pub fn default_network_config() -> ::ethsync::NetworkConfiguration {
use ethsync::{NetworkConfiguration, AllowIP};
NetworkConfiguration {
config_path: Some(replace_home("$HOME/.parity/network")),
config_path: Some(replace_home("", "$HOME/.parity/network")),
net_config_path: None,
listen_address: Some("0.0.0.0:30303".into()),
public_address: None,
@ -261,6 +262,8 @@ pub fn execute_upgrades(
compaction_profile: CompactionProfile
) -> Result<(), String> {
upgrade_data_paths(dirs, pruning);
match upgrade(Some(&dirs.path)) {
Ok(upgrades_applied) if upgrades_applied > 0 => {
debug!("Executed {} upgrade scripts - ok", upgrades_applied);
@ -271,7 +274,7 @@ pub fn execute_upgrades(
_ => {},
}
let client_path = dirs.version_path(pruning);
let client_path = dirs.db_path(pruning);
migrate(&client_path, pruning, compaction_profile).map_err(|e| format!("{}", e))
}

View File

@ -76,6 +76,14 @@ impl SpecType {
}
}
}
pub fn legacy_fork_name(&self) -> Option<String> {
match *self {
SpecType::Classic => Some("classic".to_owned()),
SpecType::Expanse => Some("expanse".to_owned()),
_ => None,
}
}
}
#[derive(Debug, PartialEq)]

View File

@ -18,11 +18,13 @@ use ethcore::ethstore::{PresaleWallet, EthStore};
use ethcore::ethstore::dir::DiskDirectory;
use ethcore::account_provider::AccountProvider;
use helpers::{password_prompt, password_from_file};
use params::SpecType;
#[derive(Debug, PartialEq)]
pub struct ImportWallet {
pub iterations: u32,
pub path: String,
pub spec: SpecType,
pub wallet_path: String,
pub password_file: Option<String>,
}

View File

@ -60,7 +60,7 @@ impl Default for IpcConfiguration {
fn default() -> Self {
IpcConfiguration {
enabled: true,
socket_addr: parity_ipc_path("$HOME/.parity/jsonrpc.ipc"),
socket_addr: parity_ipc_path("", "$HOME/.parity/jsonrpc.ipc"),
apis: ApiSet::IpcContext,
}
}

View File

@ -41,6 +41,7 @@ use params::{
tracing_switch_to_bool, fatdb_switch_to_bool, mode_switch_to_bool
};
use helpers::{to_client_config, execute_upgrades, passwords_from_files};
use upgrade::upgrade_key_location;
use dir::Directories;
use cache::CacheConfig;
use user_defaults::UserDefaults;
@ -139,7 +140,7 @@ pub fn execute(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<(), String> {
let genesis_hash = spec.genesis_header().hash();
// database paths
let db_dirs = cmd.dirs.database(genesis_hash, spec.fork_name.clone());
let db_dirs = cmd.dirs.database(genesis_hash, cmd.spec.legacy_fork_name(), spec.data_dir.clone());
// user defaults path
let user_defaults_path = db_dirs.user_defaults_path();
@ -166,7 +167,7 @@ pub fn execute(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<(), String> {
let snapshot_path = db_dirs.snapshot_path();
// execute upgrades
try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.fork_path().as_path())));
try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path())));
// run in daemon mode
if let Some(pid_file) = cmd.daemon {
@ -217,7 +218,7 @@ pub fn execute(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<(), String> {
let passwords = try!(passwords_from_files(&cmd.acc_conf.password_files));
// prepare account provider
let account_provider = Arc::new(try!(prepare_account_provider(&cmd.dirs, cmd.acc_conf, &passwords)));
let account_provider = Arc::new(try!(prepare_account_provider(&cmd.dirs, &spec.data_dir, cmd.acc_conf, &passwords)));
// let the Engine access the accounts
spec.engine.register_account_provider(account_provider.clone());
@ -449,11 +450,13 @@ fn daemonize(_pid_file: String) -> Result<(), String> {
Err("daemon is no supported on windows".into())
}
fn prepare_account_provider(dirs: &Directories, cfg: AccountsConfig, passwords: &[String]) -> Result<AccountProvider, String> {
fn prepare_account_provider(dirs: &Directories, data_dir: &str, cfg: AccountsConfig, passwords: &[String]) -> Result<AccountProvider, String> {
use ethcore::ethstore::EthStore;
use ethcore::ethstore::dir::DiskDirectory;
let dir = Box::new(try!(DiskDirectory::create(dirs.keys.clone()).map_err(|e| format!("Could not open keys directory: {}", e))));
let path = dirs.keys_path(data_dir);
upgrade_key_location(&dirs.legacy_keys_path(cfg.testnet), &path);
let dir = Box::new(try!(DiskDirectory::create(&path).map_err(|e| format!("Could not open keys directory: {}", e))));
let account_service = AccountProvider::new(Box::new(
try!(EthStore::open_with_iterations(dir, cfg.iterations).map_err(|e| format!("Could not open keys directory: {}", e)))
));

View File

@ -42,7 +42,7 @@ impl Default for Configuration {
enabled: true,
port: 8180,
interface: "127.0.0.1".into(),
signer_path: replace_home("$HOME/.parity/signer"),
signer_path: replace_home("", "$HOME/.parity/signer"),
skip_origin_validation: false,
}
}

View File

@ -143,7 +143,7 @@ impl SnapshotCommand {
let genesis_hash = spec.genesis_header().hash();
// database paths
let db_dirs = self.dirs.database(genesis_hash, spec.fork_name.clone());
let db_dirs = self.dirs.database(genesis_hash, None, spec.data_dir.clone());
// user defaults path
let user_defaults_path = db_dirs.user_defaults_path();
@ -167,7 +167,7 @@ impl SnapshotCommand {
let snapshot_path = db_dirs.snapshot_path();
// execute upgrades
try!(execute_upgrades(&db_dirs, algorithm, self.compaction.compaction_profile(db_dirs.fork_path().as_path())));
try!(execute_upgrades(&db_dirs, algorithm, self.compaction.compaction_profile(db_dirs.db_root_path().as_path())));
// prepare client config
let client_config = to_client_config(&self.cache_config, Mode::Active, tracing, fat_db, self.compaction, self.wal, VMType::default(), "".into(), algorithm, self.pruning_history, true);

View File

@ -18,10 +18,13 @@
use semver::Version;
use std::collections::*;
use std::fs::{File, create_dir_all};
use std::fs::{self, File, create_dir_all};
use std::env;
use std::io;
use std::io::{Read, Write};
use std::path::PathBuf;
use std::path::{PathBuf, Path};
use dir::DatabaseDirectories;
use util::journaldb::Algorithm;
#[cfg_attr(feature="dev", allow(enum_variant_names))]
#[derive(Debug)]
@ -126,3 +129,76 @@ pub fn upgrade(db_path: Option<&str>) -> Result<usize, Error> {
upgrade_from_version(ver)
})
}
fn file_exists(path: &Path) -> bool {
match fs::metadata(&path) {
Err(ref e) if e.kind() == io::ErrorKind::NotFound => false,
_ => true,
}
}
pub fn upgrade_key_location(from: &PathBuf, to: &PathBuf) {
match fs::create_dir_all(to).and_then(|()| fs::read_dir(from)) {
Ok(entries) => {
let files: Vec<_> = entries.filter_map(|f| f.ok().and_then(|f| if f.file_type().ok().map_or(false, |f| f.is_file()) { f.file_name().to_str().map(|s| s.to_owned()) } else { None })).collect();
let mut num: usize = 0;
for name in files {
let mut from = from.clone();
from.push(&name);
let mut to = to.clone();
to.push(&name);
if !file_exists(&to) {
if let Err(e) = fs::rename(&from, &to) {
debug!("Error upgrading key {:?}: {:?}", from, e);
} else {
num += 1;
}
} else {
debug!("Skipped upgrading key {:?}", from);
}
}
if num > 0 {
info!("Moved {} keys from {} to {}", num, from.to_string_lossy(), to.to_string_lossy());
}
},
Err(e) => {
warn!("Error moving keys from {:?} to {:?}: {:?}", from, to, e);
}
}
}
fn upgrade_dir_location(source: &PathBuf, dest: &PathBuf) {
if file_exists(&source) {
if !file_exists(&dest) {
if let Err(e) = fs::create_dir_all(&dest).and_then(|()| fs::rename(&source, &dest)) {
debug!("Skipped path {:?}:{:?}", dest, e);
} else {
info!("Moved {} to {}", source.to_string_lossy(), dest.to_string_lossy());
}
} else {
debug!("Skipped upgrading directory {:?}, Destination already exists at {:?}", source, dest);
}
}
}
fn upgrade_user_defaults(dirs: &DatabaseDirectories) {
let source = dirs.legacy_user_defaults_path();
let dest = dirs.user_defaults_path();
if file_exists(&source) {
if !file_exists(&dest) {
if let Err(e) = fs::rename(&source, &dest) {
debug!("Skipped upgrading user defaults {:?}:{:?}", dest, e);
}
} else {
debug!("Skipped upgrading user defaults {:?}, File exists at {:?}", source, dest);
}
}
}
pub fn upgrade_data_paths(dirs: &DatabaseDirectories, pruning: Algorithm) {
upgrade_dir_location(&dirs.legacy_version_path(pruning), &dirs.db_path(pruning));
upgrade_dir_location(&dirs.legacy_snapshot_path(), &dirs.snapshot_path());
upgrade_dir_location(&dirs.legacy_network_path(), &dirs.network_path());
upgrade_user_defaults(&dirs);
}