diff --git a/ethcore/res/ethereum/classic.json b/ethcore/res/ethereum/classic.json index 7c1e9454e..d1ad51790 100644 --- a/ethcore/res/ethereum/classic.json +++ b/ethcore/res/ethereum/classic.json @@ -1,6 +1,6 @@ { "name": "Ethereum Classic", - "forkName": "classic", + "dataDir": "classic", "engine": { "Ethash": { "params": { diff --git a/ethcore/res/ethereum/expanse.json b/ethcore/res/ethereum/expanse.json index 8d580b6f5..d8cbd5b0c 100644 --- a/ethcore/res/ethereum/expanse.json +++ b/ethcore/res/ethereum/expanse.json @@ -1,6 +1,6 @@ { "name": "Expanse", - "forkName": "expanse", + "dataDir": "expanse", "engine": { "Ethash": { "params": { diff --git a/ethcore/res/ethereum/frontier.json b/ethcore/res/ethereum/frontier.json index 3a9dce456..91a8ae9e6 100644 --- a/ethcore/res/ethereum/frontier.json +++ b/ethcore/res/ethereum/frontier.json @@ -1,5 +1,6 @@ { "name": "Frontier/Homestead", + "dataDir": "ethereum", "engine": { "Ethash": { "params": { diff --git a/ethcore/res/ethereum/morden.json b/ethcore/res/ethereum/morden.json index 6e725e8bf..2a3f798b6 100644 --- a/ethcore/res/ethereum/morden.json +++ b/ethcore/res/ethereum/morden.json @@ -1,5 +1,6 @@ { "name": "Morden", + "dataDir": "test", "engine": { "Ethash": { "params": { diff --git a/ethcore/res/ethereum/ropsten.json b/ethcore/res/ethereum/ropsten.json index 62282801d..d388ce9a1 100644 --- a/ethcore/res/ethereum/ropsten.json +++ b/ethcore/res/ethereum/ropsten.json @@ -1,5 +1,6 @@ { "name": "Ropsten", + "dataDir": "test", "engine": { "Ethash": { "params": { diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index 36b5e7157..50bb5ef42 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -77,9 +77,6 @@ impl ClientService { panic_handler.forward_from(&io_service); info!("Configured for {} using {} engine", Colour::White.bold().paint(spec.name.clone()), Colour::Yellow.bold().paint(spec.engine.name())); - if spec.fork_name.is_some() { - warn!("Your chain is an alternative fork. {}", Colour::Red.bold().paint("TRANSACTIONS MAY BE REPLAYED ON THE MAINNET!")); - } let mut db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index e14ea3949..c9a7a8918 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -67,7 +67,7 @@ pub struct Spec { /// What engine are we using for this? pub engine: Arc, /// The fork identifier for this chain. Only needed to distinguish two chains sharing the same genesis. - pub fork_name: Option, + pub data_dir: String, /// Known nodes on the network in enode format. pub nodes: Vec, @@ -110,10 +110,10 @@ impl From for Spec { let seal: GenericSeal = g.seal.into(); let params = CommonParams::from(s.params); Spec { - name: s.name.into(), + name: s.name.clone().into(), params: params.clone(), engine: Spec::engine(s.engine, params, builtins), - fork_name: s.fork_name.map(Into::into), + data_dir: s.data_dir.unwrap_or(s.name).into(), nodes: s.nodes.unwrap_or_else(Vec::new), parent_hash: g.parent_hash, transactions_root: g.transactions_root, diff --git a/json/src/spec/spec.rs b/json/src/spec/spec.rs index 544407cb8..f192fbfb0 100644 --- a/json/src/spec/spec.rs +++ b/json/src/spec/spec.rs @@ -27,8 +27,8 @@ pub struct Spec { /// Spec name. pub name: String, /// Special fork name. - #[serde(rename="forkName")] - pub fork_name: Option, + #[serde(rename="dataDir")] + pub data_dir: Option, /// Engine. pub engine: Engine, /// Spec params. @@ -57,6 +57,7 @@ mod tests { fn spec_deserialization() { let s = r#"{ "name": "Morden", + "dataDir": "morden", "engine": { "Ethash": { "params": { diff --git a/parity/account.rs b/parity/account.rs index ae7e1f62b..9479d79d6 100644 --- a/parity/account.rs +++ b/parity/account.rs @@ -14,23 +14,32 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::path::PathBuf; use ethcore::ethstore::{EthStore, SecretStore, import_accounts, read_geth_accounts}; use ethcore::ethstore::dir::DiskDirectory; use ethcore::account_provider::AccountProvider; use helpers::{password_prompt, password_from_file}; +use params::SpecType; #[derive(Debug, PartialEq)] pub enum AccountCmd { New(NewAccount), - List(String), + List(ListAccounts), Import(ImportAccounts), ImportFromGeth(ImportFromGethAccounts) } +#[derive(Debug, PartialEq)] +pub struct ListAccounts { + pub path: String, + pub spec: SpecType, +} + #[derive(Debug, PartialEq)] pub struct NewAccount { pub iterations: u32, pub path: String, + pub spec: SpecType, pub password_file: Option, } @@ -38,6 +47,7 @@ pub struct NewAccount { pub struct ImportAccounts { pub from: Vec, pub to: String, + pub spec: SpecType, } /// Parameters for geth accounts' import @@ -47,18 +57,22 @@ pub struct ImportFromGethAccounts { pub testnet: bool, /// directory to import accounts to pub to: String, + pub spec: SpecType, } pub fn execute(cmd: AccountCmd) -> Result { match cmd { AccountCmd::New(new_cmd) => new(new_cmd), - AccountCmd::List(path) => list(path), + AccountCmd::List(list_cmd) => list(list_cmd), AccountCmd::Import(import_cmd) => import(import_cmd), AccountCmd::ImportFromGeth(import_geth_cmd) => import_geth(import_geth_cmd) } } -fn keys_dir(path: String) -> Result { +fn keys_dir(path: String, spec: SpecType) -> Result { + let spec = try!(spec.spec()); + let mut path = PathBuf::from(&path); + path.push(spec.data_dir); DiskDirectory::create(path).map_err(|e| format!("Could not open keys directory: {}", e)) } @@ -75,15 +89,15 @@ fn new(n: NewAccount) -> Result { None => try!(password_prompt()), }; - let dir = Box::new(try!(keys_dir(n.path))); + let dir = Box::new(try!(keys_dir(n.path, n.spec))); let secret_store = Box::new(try!(secret_store(dir, Some(n.iterations)))); let acc_provider = AccountProvider::new(secret_store); let new_account = try!(acc_provider.new_account(&password).map_err(|e| format!("Could not create new account: {}", e))); Ok(format!("{:?}", new_account)) } -fn list(path: String) -> Result { - let dir = Box::new(try!(keys_dir(path))); +fn list(list_cmd: ListAccounts) -> Result { + let dir = Box::new(try!(keys_dir(list_cmd.path, list_cmd.spec))); let secret_store = Box::new(try!(secret_store(dir, None))); let acc_provider = AccountProvider::new(secret_store); let accounts = acc_provider.accounts(); @@ -96,7 +110,7 @@ fn list(path: String) -> Result { } fn import(i: ImportAccounts) -> Result { - let to = try!(keys_dir(i.to)); + let to = try!(keys_dir(i.to, i.spec)); let mut imported = 0; for path in &i.from { let from = DiskDirectory::at(path); @@ -109,7 +123,7 @@ fn import_geth(i: ImportFromGethAccounts) -> Result { use std::io::ErrorKind; use ethcore::ethstore::Error; - let dir = Box::new(try!(keys_dir(i.to))); + let dir = Box::new(try!(keys_dir(i.to, i.spec))); let secret_store = Box::new(try!(secret_store(dir, None))); let geth_accounts = read_geth_accounts(i.testnet); match secret_store.import_geth_accounts(geth_accounts, i.testnet) { diff --git a/parity/blockchain.rs b/parity/blockchain.rs index a9d81e5c3..cc80f9d13 100644 --- a/parity/blockchain.rs +++ b/parity/blockchain.rs @@ -150,7 +150,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result { let genesis_hash = spec.genesis_header().hash(); // database paths - let db_dirs = cmd.dirs.database(genesis_hash, spec.fork_name.clone()); + let db_dirs = cmd.dirs.database(genesis_hash, None, spec.data_dir.clone()); // user defaults path let user_defaults_path = db_dirs.user_defaults_path(); @@ -174,7 +174,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result { let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.fork_path().as_path()))); + try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path()))); // prepare client config let mut client_config = to_client_config( @@ -321,7 +321,7 @@ fn start_client( let genesis_hash = spec.genesis_header().hash(); // database paths - let db_dirs = dirs.database(genesis_hash, spec.fork_name.clone()); + let db_dirs = dirs.database(genesis_hash, None, spec.data_dir.clone()); // user defaults path let user_defaults_path = db_dirs.user_defaults_path(); @@ -345,7 +345,7 @@ fn start_client( let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - try!(execute_upgrades(&db_dirs, algorithm, compaction.compaction_profile(db_dirs.fork_path().as_path()))); + try!(execute_upgrades(&db_dirs, algorithm, compaction.compaction_profile(db_dirs.db_root_path().as_path()))); // prepare client config let client_config = to_client_config(&cache_config, Mode::Active, tracing, fat_db, compaction, wal, VMType::default(), "".into(), algorithm, pruning_history, true); diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index 707f4159d..9f9ed0119 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -81,7 +81,7 @@ usage! { flag_mode_alarm: u64 = 3600u64, or |c: &Config| otry!(c.parity).mode_alarm.clone(), flag_chain: String = "homestead", or |c: &Config| otry!(c.parity).chain.clone(), flag_db_path: String = "$HOME/.parity", or |c: &Config| otry!(c.parity).db_path.clone(), - flag_keys_path: String = "$HOME/.parity/keys", or |c: &Config| otry!(c.parity).keys_path.clone(), + flag_keys_path: String = "$DATA/keys", or |c: &Config| otry!(c.parity).keys_path.clone(), flag_identity: String = "", or |c: &Config| otry!(c.parity).identity.clone(), // -- Account Options @@ -100,7 +100,7 @@ usage! { or |c: &Config| otry!(c.ui).port.clone(), flag_ui_interface: String = "local", or |c: &Config| otry!(c.ui).interface.clone(), - flag_ui_path: String = "$HOME/.parity/signer", + flag_ui_path: String = "$DATA/signer", or |c: &Config| otry!(c.ui).path.clone(), // NOTE [todr] For security reasons don't put this to config files flag_ui_no_validation: bool = false, or |_| None, @@ -156,7 +156,7 @@ usage! { // IPC flag_no_ipc: bool = false, or |c: &Config| otry!(c.ipc).disable.clone(), - flag_ipc_path: String = "$HOME/.parity/jsonrpc.ipc", + flag_ipc_path: String = "$DATA/jsonrpc.ipc", or |c: &Config| otry!(c.ipc).path.clone(), flag_ipc_apis: String = "web3,eth,net,parity,parity_accounts,traces,rpc", or |c: &Config| otry!(c.ipc).apis.clone().map(|vec| vec.join(",")), @@ -170,7 +170,7 @@ usage! { or |c: &Config| otry!(c.dapps).interface.clone(), flag_dapps_hosts: String = "none", or |c: &Config| otry!(c.dapps).hosts.clone().map(|vec| vec.join(",")), - flag_dapps_path: String = "$HOME/.parity/dapps", + flag_dapps_path: String = "$DATA/dapps", or |c: &Config| otry!(c.dapps).path.clone(), flag_dapps_user: Option = None, or |c: &Config| otry!(c.dapps).user.clone().map(Some), @@ -271,7 +271,7 @@ usage! { or |c: &Config| otry!(c.vm).jit.clone(), // -- Miscellaneous Options - flag_config: String = "$HOME/.parity/config.toml", or |_| None, + flag_config: String = "$DATA/config.toml", or |_| None, flag_logging: Option = None, or |c: &Config| otry!(c.misc).logging.clone().map(Some), flag_log_file: Option = None, diff --git a/parity/cli/usage.rs b/parity/cli/usage.rs index 6dcbd6453..0d929d9cf 100644 --- a/parity/cli/usage.rs +++ b/parity/cli/usage.rs @@ -145,7 +145,7 @@ macro_rules! usage { } let config_file = raw_args.flag_config.clone().unwrap_or_else(|| raw_args.clone().into_args(Config::default()).flag_config); - let config_file = replace_home(&config_file); + let config_file = replace_home("", &config_file); let config = match (fs::File::open(&config_file), raw_args.flag_config.is_some()) { // Load config file (Ok(mut file), _) => { diff --git a/parity/configuration.rs b/parity/configuration.rs index 84287719e..164384e7e 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -40,7 +40,7 @@ use signer::{Configuration as SignerConfiguration}; use run::RunCmd; use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, ExportState, DataFormat}; use presale::ImportWallet; -use account::{AccountCmd, NewAccount, ImportAccounts, ImportFromGethAccounts}; +use account::{AccountCmd, NewAccount, ListAccounts, ImportAccounts, ImportFromGethAccounts}; use snapshot::{self, SnapshotCommand}; #[derive(Debug, PartialEq)] @@ -112,15 +112,21 @@ impl Configuration { let new_acc = NewAccount { iterations: self.args.flag_keys_iterations, path: dirs.keys, + spec: spec, password_file: self.args.flag_password.first().cloned(), }; AccountCmd::New(new_acc) } else if self.args.cmd_list { - AccountCmd::List(dirs.keys) + let list_acc = ListAccounts { + path: dirs.keys, + spec: spec, + }; + AccountCmd::List(list_acc) } else if self.args.cmd_import { let import_acc = ImportAccounts { from: self.args.arg_path.clone(), to: dirs.keys, + spec: spec, }; AccountCmd::Import(import_acc) } else { @@ -130,6 +136,7 @@ impl Configuration { } else if self.args.flag_import_geth_keys { let account_cmd = AccountCmd::ImportFromGeth( ImportFromGethAccounts { + spec: spec, to: dirs.keys, testnet: self.args.flag_testnet } @@ -139,6 +146,7 @@ impl Configuration { let presale_cmd = ImportWallet { iterations: self.args.flag_keys_iterations, path: dirs.keys, + spec: spec, wallet_path: self.args.arg_path.first().unwrap().clone(), password_file: self.args.flag_password.first().cloned(), }; @@ -530,7 +538,7 @@ impl Configuration { ret.snapshot_peers = self.snapshot_peers(); ret.allow_ips = try!(self.allow_ips()); ret.max_pending_peers = self.max_pending_peers(); - let mut net_path = PathBuf::from(self.directories().db); + let mut net_path = PathBuf::from(self.directories().data); net_path.push("network"); ret.config_path = Some(net_path.to_str().unwrap().to_owned()); ret.reserved_nodes = try!(self.init_reserved_nodes()); @@ -624,18 +632,11 @@ impl Configuration { fn directories(&self) -> Directories { use util::path; - let db_path = replace_home(self.args.flag_datadir.as_ref().unwrap_or(&self.args.flag_db_path)); + let data_path = replace_home("", self.args.flag_datadir.as_ref().unwrap_or(&self.args.flag_db_path)); - let keys_path = replace_home( - if self.args.flag_testnet { - "$HOME/.parity/testnet_keys" - } else { - &self.args.flag_keys_path - } - ); - - let dapps_path = replace_home(&self.args.flag_dapps_path); - let ui_path = replace_home(&self.args.flag_ui_path); + let keys_path = replace_home(&data_path, &self.args.flag_keys_path); + let dapps_path = replace_home(&data_path, &self.args.flag_dapps_path); + let ui_path = replace_home(&data_path, &self.args.flag_ui_path); if self.args.flag_geth && !cfg!(windows) { let geth_root = if self.args.flag_testnet { path::ethereum::test() } else { path::ethereum::default() }; @@ -644,7 +645,7 @@ impl Configuration { } if cfg!(feature = "ipc") && !cfg!(feature = "windows") { - let mut path_buf = PathBuf::from(db_path.clone()); + let mut path_buf = PathBuf::from(data_path.clone()); path_buf.push("ipc"); let ipc_path = path_buf.to_str().unwrap(); ::std::fs::create_dir_all(ipc_path).unwrap_or_else( @@ -654,7 +655,7 @@ impl Configuration { Directories { keys: keys_path, - db: db_path, + data: data_path, dapps: dapps_path, signer: ui_path, } @@ -664,7 +665,7 @@ impl Configuration { if self.args.flag_geth { geth_ipc_path(self.args.flag_testnet) } else { - parity_ipc_path(&self.args.flag_ipcpath.clone().unwrap_or(self.args.flag_ipc_path.clone())) + parity_ipc_path(&self.directories().data, &self.args.flag_ipcpath.clone().unwrap_or(self.args.flag_ipc_path.clone())) } } @@ -764,7 +765,7 @@ mod tests { let conf = parse(&args); assert_eq!(conf.into_command().unwrap().cmd, Cmd::Account(AccountCmd::New(NewAccount { iterations: 10240, - path: replace_home("$HOME/.parity/keys"), + path: replace_home("", "$HOME/.parity/keys"), password_file: None, }))); } @@ -774,7 +775,7 @@ mod tests { let args = vec!["parity", "account", "list"]; let conf = parse(&args); assert_eq!(conf.into_command().unwrap().cmd, Cmd::Account( - AccountCmd::List(replace_home("$HOME/.parity/keys")), + AccountCmd::List(replace_home("", "$HOME/.parity/keys")), )); } @@ -784,7 +785,7 @@ mod tests { let conf = parse(&args); assert_eq!(conf.into_command().unwrap().cmd, Cmd::Account(AccountCmd::Import(ImportAccounts { from: vec!["my_dir".into(), "another_dir".into()], - to: replace_home("$HOME/.parity/keys"), + to: replace_home("", "$HOME/.parity/keys"), }))); } @@ -794,7 +795,7 @@ mod tests { let conf = parse(&args); assert_eq!(conf.into_command().unwrap().cmd, Cmd::ImportPresaleWallet(ImportWallet { iterations: 10240, - path: replace_home("$HOME/.parity/keys"), + path: replace_home("", "$HOME/.parity/keys"), wallet_path: "my_wallet.json".into(), password_file: Some("pwd".into()), })); @@ -895,7 +896,7 @@ mod tests { fn test_command_signer_new_token() { let args = vec!["parity", "signer", "new-token"]; let conf = parse(&args); - let expected = replace_home("$HOME/.parity/signer"); + let expected = replace_home("", "$HOME/.parity/signer"); assert_eq!(conf.into_command().unwrap().cmd, Cmd::SignerToken(SignerConfiguration { enabled: true, signer_path: expected, diff --git a/parity/dapps.rs b/parity/dapps.rs index ec6fd8846..b064a2efb 100644 --- a/parity/dapps.rs +++ b/parity/dapps.rs @@ -41,7 +41,7 @@ impl Default for Configuration { hosts: Some(Vec::new()), user: None, pass: None, - dapps_path: replace_home("$HOME/.parity/dapps"), + dapps_path: replace_home("", "$HOME/.parity/dapps"), } } } diff --git a/parity/dir.rs b/parity/dir.rs index b9c02efd6..c61af3a8c 100644 --- a/parity/dir.rs +++ b/parity/dir.rs @@ -26,7 +26,7 @@ const LEGACY_CLIENT_DB_VER_STR: &'static str = "5.3"; #[derive(Debug, PartialEq)] pub struct Directories { - pub db: String, + pub data: String, pub keys: String, pub signer: String, pub dapps: String, @@ -35,17 +35,17 @@ pub struct Directories { impl Default for Directories { fn default() -> Self { Directories { - db: replace_home("$HOME/.parity"), - keys: replace_home("$HOME/.parity/keys"), - signer: replace_home("$HOME/.parity/signer"), - dapps: replace_home("$HOME/.parity/dapps"), + data: replace_home("", "$HOME/.parity"), + keys: replace_home("", "$HOME/.parity/keys"), + signer: replace_home("", "$HOME/.parity/signer"), + dapps: replace_home("", "$HOME/.parity/dapps"), } } } impl Directories { pub fn create_dirs(&self, dapps_enabled: bool, signer_enabled: bool) -> Result<(), String> { - try!(fs::create_dir_all(&self.db).map_err(|e| e.to_string())); + try!(fs::create_dir_all(&self.data).map_err(|e| e.to_string())); try!(fs::create_dir_all(&self.keys).map_err(|e| e.to_string())); if signer_enabled { try!(fs::create_dir_all(&self.signer).map_err(|e| e.to_string())); @@ -57,20 +57,38 @@ impl Directories { } /// Database paths. - pub fn database(&self, genesis_hash: H256, fork_name: Option) -> DatabaseDirectories { + pub fn database(&self, genesis_hash: H256, fork_name: Option, spec_name: String) -> DatabaseDirectories { DatabaseDirectories { - path: self.db.clone(), + path: self.data.clone(), genesis_hash: genesis_hash, fork_name: fork_name, + spec_name: spec_name, } } /// Get the ipc sockets path pub fn ipc_path(&self) -> PathBuf { - let mut dir = Path::new(&self.db).to_path_buf(); + let mut dir = Path::new(&self.data).to_path_buf(); dir.push("ipc"); dir } + + // TODO: remove in 1.7 + pub fn legacy_keys_path(&self, testnet: bool) -> PathBuf { + let mut dir = Path::new(&self.data).to_path_buf(); + if testnet { + dir.push("testnet_keys"); + } else { + dir.push("keys"); + } + dir + } + + pub fn keys_path(&self, spec_name: &str) -> PathBuf { + let mut dir = PathBuf::from(&self.keys); + dir.push(spec_name); + dir + } } #[derive(Debug, PartialEq)] @@ -78,47 +96,93 @@ pub struct DatabaseDirectories { pub path: String, pub genesis_hash: H256, pub fork_name: Option, + pub spec_name: String, } impl DatabaseDirectories { /// Base DB directory for the given fork. - pub fn fork_path(&self) -> PathBuf { + // TODO: remove in 1.7 + pub fn legacy_fork_path(&self) -> PathBuf { let mut dir = Path::new(&self.path).to_path_buf(); dir.push(format!("{:?}{}", H64::from(self.genesis_hash), self.fork_name.as_ref().map(|f| format!("-{}", f)).unwrap_or_default())); dir } - /// Get the root path for database - pub fn version_path(&self, pruning: Algorithm) -> PathBuf { - let mut dir = self.fork_path(); - dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str())); + pub fn spec_root_path(&self) -> PathBuf { + let mut dir = Path::new(&self.path).to_path_buf(); + dir.push("chains"); + dir.push(&self.spec_name); dir } - /// Get the path for the databases given the genesis_hash and information on the databases. pub fn client_path(&self, pruning: Algorithm) -> PathBuf { - let mut dir = self.version_path(pruning); + let mut dir = self.db_root_path(); + dir.push(pruning.as_internal_name_str()); dir.push("db"); dir } + pub fn db_root_path(&self) -> PathBuf { + let mut dir = self.spec_root_path(); + dir.push("db"); + dir.push(H64::from(self.genesis_hash).hex()); + dir + } + + pub fn db_path(&self, pruning: Algorithm) -> PathBuf { + let mut dir = self.db_root_path(); + dir.push(pruning.as_internal_name_str()); + dir + } + + /// Get the root path for database + // TODO: remove in 1.7 + pub fn legacy_version_path(&self, pruning: Algorithm) -> PathBuf { + let mut dir = self.legacy_fork_path(); + dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str())); + dir + } + /// Get user defaults path + // TODO: remove in 1.7 + pub fn legacy_user_defaults_path(&self) -> PathBuf { + let mut dir = self.legacy_fork_path(); + dir.push("user_defaults"); + dir + } + + /// Get user defaults path + // TODO: remove in 1.7 + pub fn legacy_snapshot_path(&self) -> PathBuf { + let mut dir = self.legacy_fork_path(); + dir.push("snapshot"); + dir + } + + /// Get user defaults path + // TODO: remove in 1.7 + pub fn legacy_network_path(&self) -> PathBuf { + let mut dir = self.legacy_fork_path(); + dir.push("network"); + dir + } + pub fn user_defaults_path(&self) -> PathBuf { - let mut dir = self.fork_path(); + let mut dir = self.spec_root_path(); dir.push("user_defaults"); dir } /// Get the path for the snapshot directory given the genesis hash and fork name. pub fn snapshot_path(&self) -> PathBuf { - let mut dir = self.fork_path(); + let mut dir = self.db_root_path(); dir.push("snapshot"); dir } /// Get the path for the network directory. pub fn network_path(&self) -> PathBuf { - let mut dir = self.fork_path(); + let mut dir = self.spec_root_path(); dir.push("network"); dir } diff --git a/parity/helpers.rs b/parity/helpers.rs index 60a04bc45..e2260f405 100644 --- a/parity/helpers.rs +++ b/parity/helpers.rs @@ -24,7 +24,7 @@ use ethcore::client::{Mode, BlockId, VMType, DatabaseCompactionProfile, ClientCo use ethcore::miner::{PendingSet, GasLimit, PrioritizationStrategy}; use cache::CacheConfig; use dir::DatabaseDirectories; -use upgrade::upgrade; +use upgrade::{upgrade, upgrade_data_paths}; use migration::migrate; use ethsync::is_valid_node_url; @@ -132,9 +132,10 @@ pub fn to_price(s: &str) -> Result { } /// Replaces `$HOME` str with home directory path. -pub fn replace_home(arg: &str) -> String { +pub fn replace_home(base: &str, arg: &str) -> String { // the $HOME directory on mac os should be `~/Library` or `~/Library/Application Support` let r = arg.replace("$HOME", env::home_dir().unwrap().to_str().unwrap()); + let r = r.replace("$DATA", base ); r.replace("/", &::std::path::MAIN_SEPARATOR.to_string() ) } @@ -159,13 +160,13 @@ pub fn geth_ipc_path(testnet: bool) -> String { } /// Formats and returns parity ipc path. -pub fn parity_ipc_path(s: &str) -> String { +pub fn parity_ipc_path(base: &str, s: &str) -> String { // Windows path should not be hardcoded here. if cfg!(windows) { return r"\\.\pipe\parity.jsonrpc".to_owned(); } - replace_home(s) + replace_home(base, s) } /// Validates and formats bootnodes option. @@ -187,7 +188,7 @@ pub fn to_bootnodes(bootnodes: &Option) -> Result, String> { pub fn default_network_config() -> ::ethsync::NetworkConfiguration { use ethsync::{NetworkConfiguration, AllowIP}; NetworkConfiguration { - config_path: Some(replace_home("$HOME/.parity/network")), + config_path: Some(replace_home("", "$HOME/.parity/network")), net_config_path: None, listen_address: Some("0.0.0.0:30303".into()), public_address: None, @@ -261,6 +262,8 @@ pub fn execute_upgrades( compaction_profile: CompactionProfile ) -> Result<(), String> { + upgrade_data_paths(dirs, pruning); + match upgrade(Some(&dirs.path)) { Ok(upgrades_applied) if upgrades_applied > 0 => { debug!("Executed {} upgrade scripts - ok", upgrades_applied); @@ -271,7 +274,7 @@ pub fn execute_upgrades( _ => {}, } - let client_path = dirs.version_path(pruning); + let client_path = dirs.db_path(pruning); migrate(&client_path, pruning, compaction_profile).map_err(|e| format!("{}", e)) } diff --git a/parity/params.rs b/parity/params.rs index 25ddc8814..ff87039e5 100644 --- a/parity/params.rs +++ b/parity/params.rs @@ -76,6 +76,14 @@ impl SpecType { } } } + + pub fn legacy_fork_name(&self) -> Option { + match *self { + SpecType::Classic => Some("classic".to_owned()), + SpecType::Expanse => Some("expanse".to_owned()), + _ => None, + } + } } #[derive(Debug, PartialEq)] diff --git a/parity/presale.rs b/parity/presale.rs index 098832428..c974923dd 100644 --- a/parity/presale.rs +++ b/parity/presale.rs @@ -18,11 +18,13 @@ use ethcore::ethstore::{PresaleWallet, EthStore}; use ethcore::ethstore::dir::DiskDirectory; use ethcore::account_provider::AccountProvider; use helpers::{password_prompt, password_from_file}; +use params::SpecType; #[derive(Debug, PartialEq)] pub struct ImportWallet { pub iterations: u32, pub path: String, + pub spec: SpecType, pub wallet_path: String, pub password_file: Option, } diff --git a/parity/rpc.rs b/parity/rpc.rs index 52a5bcc0f..18487b427 100644 --- a/parity/rpc.rs +++ b/parity/rpc.rs @@ -60,7 +60,7 @@ impl Default for IpcConfiguration { fn default() -> Self { IpcConfiguration { enabled: true, - socket_addr: parity_ipc_path("$HOME/.parity/jsonrpc.ipc"), + socket_addr: parity_ipc_path("", "$HOME/.parity/jsonrpc.ipc"), apis: ApiSet::IpcContext, } } diff --git a/parity/run.rs b/parity/run.rs index 8128b3a90..c78484f87 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -41,6 +41,7 @@ use params::{ tracing_switch_to_bool, fatdb_switch_to_bool, mode_switch_to_bool }; use helpers::{to_client_config, execute_upgrades, passwords_from_files}; +use upgrade::upgrade_key_location; use dir::Directories; use cache::CacheConfig; use user_defaults::UserDefaults; @@ -139,7 +140,7 @@ pub fn execute(cmd: RunCmd, logger: Arc) -> Result<(), String> { let genesis_hash = spec.genesis_header().hash(); // database paths - let db_dirs = cmd.dirs.database(genesis_hash, spec.fork_name.clone()); + let db_dirs = cmd.dirs.database(genesis_hash, cmd.spec.legacy_fork_name(), spec.data_dir.clone()); // user defaults path let user_defaults_path = db_dirs.user_defaults_path(); @@ -166,7 +167,7 @@ pub fn execute(cmd: RunCmd, logger: Arc) -> Result<(), String> { let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.fork_path().as_path()))); + try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path()))); // run in daemon mode if let Some(pid_file) = cmd.daemon { @@ -217,7 +218,7 @@ pub fn execute(cmd: RunCmd, logger: Arc) -> Result<(), String> { let passwords = try!(passwords_from_files(&cmd.acc_conf.password_files)); // prepare account provider - let account_provider = Arc::new(try!(prepare_account_provider(&cmd.dirs, cmd.acc_conf, &passwords))); + let account_provider = Arc::new(try!(prepare_account_provider(&cmd.dirs, &spec.data_dir, cmd.acc_conf, &passwords))); // let the Engine access the accounts spec.engine.register_account_provider(account_provider.clone()); @@ -449,11 +450,13 @@ fn daemonize(_pid_file: String) -> Result<(), String> { Err("daemon is no supported on windows".into()) } -fn prepare_account_provider(dirs: &Directories, cfg: AccountsConfig, passwords: &[String]) -> Result { +fn prepare_account_provider(dirs: &Directories, data_dir: &str, cfg: AccountsConfig, passwords: &[String]) -> Result { use ethcore::ethstore::EthStore; use ethcore::ethstore::dir::DiskDirectory; - let dir = Box::new(try!(DiskDirectory::create(dirs.keys.clone()).map_err(|e| format!("Could not open keys directory: {}", e)))); + let path = dirs.keys_path(data_dir); + upgrade_key_location(&dirs.legacy_keys_path(cfg.testnet), &path); + let dir = Box::new(try!(DiskDirectory::create(&path).map_err(|e| format!("Could not open keys directory: {}", e)))); let account_service = AccountProvider::new(Box::new( try!(EthStore::open_with_iterations(dir, cfg.iterations).map_err(|e| format!("Could not open keys directory: {}", e))) )); diff --git a/parity/signer.rs b/parity/signer.rs index 6905fbb3c..8f7a3d646 100644 --- a/parity/signer.rs +++ b/parity/signer.rs @@ -42,7 +42,7 @@ impl Default for Configuration { enabled: true, port: 8180, interface: "127.0.0.1".into(), - signer_path: replace_home("$HOME/.parity/signer"), + signer_path: replace_home("", "$HOME/.parity/signer"), skip_origin_validation: false, } } diff --git a/parity/snapshot.rs b/parity/snapshot.rs index 804047596..9493d53e9 100644 --- a/parity/snapshot.rs +++ b/parity/snapshot.rs @@ -143,7 +143,7 @@ impl SnapshotCommand { let genesis_hash = spec.genesis_header().hash(); // database paths - let db_dirs = self.dirs.database(genesis_hash, spec.fork_name.clone()); + let db_dirs = self.dirs.database(genesis_hash, None, spec.data_dir.clone()); // user defaults path let user_defaults_path = db_dirs.user_defaults_path(); @@ -167,7 +167,7 @@ impl SnapshotCommand { let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - try!(execute_upgrades(&db_dirs, algorithm, self.compaction.compaction_profile(db_dirs.fork_path().as_path()))); + try!(execute_upgrades(&db_dirs, algorithm, self.compaction.compaction_profile(db_dirs.db_root_path().as_path()))); // prepare client config let client_config = to_client_config(&self.cache_config, Mode::Active, tracing, fat_db, self.compaction, self.wal, VMType::default(), "".into(), algorithm, self.pruning_history, true); diff --git a/parity/upgrade.rs b/parity/upgrade.rs index 7e5b73622..273d98d1c 100644 --- a/parity/upgrade.rs +++ b/parity/upgrade.rs @@ -18,10 +18,13 @@ use semver::Version; use std::collections::*; -use std::fs::{File, create_dir_all}; +use std::fs::{self, File, create_dir_all}; use std::env; +use std::io; use std::io::{Read, Write}; -use std::path::PathBuf; +use std::path::{PathBuf, Path}; +use dir::DatabaseDirectories; +use util::journaldb::Algorithm; #[cfg_attr(feature="dev", allow(enum_variant_names))] #[derive(Debug)] @@ -126,3 +129,76 @@ pub fn upgrade(db_path: Option<&str>) -> Result { upgrade_from_version(ver) }) } + + +fn file_exists(path: &Path) -> bool { + match fs::metadata(&path) { + Err(ref e) if e.kind() == io::ErrorKind::NotFound => false, + _ => true, + } +} + +pub fn upgrade_key_location(from: &PathBuf, to: &PathBuf) { + match fs::create_dir_all(to).and_then(|()| fs::read_dir(from)) { + Ok(entries) => { + let files: Vec<_> = entries.filter_map(|f| f.ok().and_then(|f| if f.file_type().ok().map_or(false, |f| f.is_file()) { f.file_name().to_str().map(|s| s.to_owned()) } else { None })).collect(); + let mut num: usize = 0; + for name in files { + let mut from = from.clone(); + from.push(&name); + let mut to = to.clone(); + to.push(&name); + if !file_exists(&to) { + if let Err(e) = fs::rename(&from, &to) { + debug!("Error upgrading key {:?}: {:?}", from, e); + } else { + num += 1; + } + } else { + debug!("Skipped upgrading key {:?}", from); + } + } + if num > 0 { + info!("Moved {} keys from {} to {}", num, from.to_string_lossy(), to.to_string_lossy()); + } + }, + Err(e) => { + warn!("Error moving keys from {:?} to {:?}: {:?}", from, to, e); + } + } +} + +fn upgrade_dir_location(source: &PathBuf, dest: &PathBuf) { + if file_exists(&source) { + if !file_exists(&dest) { + if let Err(e) = fs::create_dir_all(&dest).and_then(|()| fs::rename(&source, &dest)) { + debug!("Skipped path {:?}:{:?}", dest, e); + } else { + info!("Moved {} to {}", source.to_string_lossy(), dest.to_string_lossy()); + } + } else { + debug!("Skipped upgrading directory {:?}, Destination already exists at {:?}", source, dest); + } + } +} + +fn upgrade_user_defaults(dirs: &DatabaseDirectories) { + let source = dirs.legacy_user_defaults_path(); + let dest = dirs.user_defaults_path(); + if file_exists(&source) { + if !file_exists(&dest) { + if let Err(e) = fs::rename(&source, &dest) { + debug!("Skipped upgrading user defaults {:?}:{:?}", dest, e); + } + } else { + debug!("Skipped upgrading user defaults {:?}, File exists at {:?}", source, dest); + } + } +} + +pub fn upgrade_data_paths(dirs: &DatabaseDirectories, pruning: Algorithm) { + upgrade_dir_location(&dirs.legacy_version_path(pruning), &dirs.db_path(pruning)); + upgrade_dir_location(&dirs.legacy_snapshot_path(), &dirs.snapshot_path()); + upgrade_dir_location(&dirs.legacy_network_path(), &dirs.network_path()); + upgrade_user_defaults(&dirs); +}