2017-01-25 18:51:41 +01:00
|
|
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
2016-06-20 10:06:49 +02:00
|
|
|
// This file is part of Parity.
|
|
|
|
|
|
|
|
// Parity is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
|
|
|
// Parity is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
|
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2016-07-25 16:09:47 +02:00
|
|
|
use std::{fs, io};
|
2018-02-12 18:03:37 +01:00
|
|
|
use std::io::Write;
|
2016-06-20 00:10:34 +02:00
|
|
|
use std::path::{PathBuf, Path};
|
|
|
|
use std::collections::HashMap;
|
2016-07-25 10:45:45 +02:00
|
|
|
use time;
|
2016-07-25 16:09:47 +02:00
|
|
|
use {json, SafeAccount, Error};
|
2016-12-09 23:01:43 +01:00
|
|
|
use json::Uuid;
|
2017-01-30 11:44:09 +01:00
|
|
|
use super::{KeyDirectory, VaultKeyDirectory, VaultKeyDirectoryProvider, VaultKey};
|
2017-02-05 16:17:56 +01:00
|
|
|
use super::vault::{VAULT_FILE_NAME, VaultDiskDirectory};
|
2016-06-20 00:10:34 +02:00
|
|
|
|
2017-01-30 10:59:46 +01:00
|
|
|
const IGNORED_FILES: &'static [&'static str] = &[
|
|
|
|
"thumbs.db",
|
|
|
|
"address_book.json",
|
|
|
|
"dapps_policy.json",
|
|
|
|
"dapps_accounts.json",
|
|
|
|
"dapps_history.json",
|
2017-01-30 11:44:09 +01:00
|
|
|
"vault.json",
|
2017-01-30 10:59:46 +01:00
|
|
|
];
|
2016-08-10 16:42:15 +02:00
|
|
|
|
2016-06-20 00:10:34 +02:00
|
|
|
#[cfg(not(windows))]
|
|
|
|
fn restrict_permissions_to_owner(file_path: &Path) -> Result<(), i32> {
|
2016-07-25 16:09:47 +02:00
|
|
|
use std::ffi;
|
|
|
|
use libc;
|
2016-10-25 22:34:52 +02:00
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
let cstr = ffi::CString::new(&*file_path.to_string_lossy())
|
|
|
|
.map_err(|_| -1)?;
|
2016-06-20 00:10:34 +02:00
|
|
|
match unsafe { libc::chmod(cstr.as_ptr(), libc::S_IWUSR | libc::S_IRUSR) } {
|
|
|
|
0 => Ok(()),
|
|
|
|
x => Err(x),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(windows)]
|
2016-07-25 16:09:47 +02:00
|
|
|
fn restrict_permissions_to_owner(_file_path: &Path) -> Result<(), i32> {
|
2016-06-20 00:10:34 +02:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2017-01-30 11:44:09 +01:00
|
|
|
/// Root keys directory implementation
|
|
|
|
pub type RootDiskDirectory = DiskDirectory<DiskKeyFileManager>;
|
|
|
|
|
|
|
|
/// Disk directory key file manager
|
|
|
|
pub trait KeyFileManager: Send + Sync {
|
|
|
|
/// Read `SafeAccount` from given key file stream
|
|
|
|
fn read<T>(&self, filename: Option<String>, reader: T) -> Result<SafeAccount, Error> where T: io::Read;
|
|
|
|
/// Write `SafeAccount` to given key file stream
|
|
|
|
fn write<T>(&self, account: SafeAccount, writer: &mut T) -> Result<(), Error> where T: io::Write;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Disk-based keys directory implementation
|
|
|
|
pub struct DiskDirectory<T> where T: KeyFileManager {
|
2016-06-20 00:10:34 +02:00
|
|
|
path: PathBuf,
|
2017-01-30 11:44:09 +01:00
|
|
|
key_manager: T,
|
2016-06-20 00:10:34 +02:00
|
|
|
}
|
|
|
|
|
2017-01-30 11:44:09 +01:00
|
|
|
/// Keys file manager for root keys directory
|
|
|
|
pub struct DiskKeyFileManager;
|
|
|
|
|
|
|
|
impl RootDiskDirectory {
|
2016-06-20 00:10:34 +02:00
|
|
|
pub fn create<P>(path: P) -> Result<Self, Error> where P: AsRef<Path> {
|
2016-12-27 12:53:56 +01:00
|
|
|
fs::create_dir_all(&path)?;
|
2016-06-20 00:10:34 +02:00
|
|
|
Ok(Self::at(path))
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn at<P>(path: P) -> Self where P: AsRef<Path> {
|
2017-01-30 11:44:09 +01:00
|
|
|
DiskDirectory::new(path, DiskKeyFileManager)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> DiskDirectory<T> where T: KeyFileManager {
|
|
|
|
/// Create new disk directory instance
|
|
|
|
pub fn new<P>(path: P, key_manager: T) -> Self where P: AsRef<Path> {
|
2016-06-20 00:10:34 +02:00
|
|
|
DiskDirectory {
|
|
|
|
path: path.as_ref().to_path_buf(),
|
2017-01-30 11:44:09 +01:00
|
|
|
key_manager: key_manager,
|
2016-06-20 00:10:34 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-16 16:20:24 +01:00
|
|
|
fn files(&self) -> Result<Vec<PathBuf>, Error> {
|
|
|
|
Ok(fs::read_dir(&self.path)?
|
2016-06-20 00:10:34 +02:00
|
|
|
.flat_map(Result::ok)
|
|
|
|
.filter(|entry| {
|
2016-10-25 22:34:52 +02:00
|
|
|
let metadata = entry.metadata().ok();
|
2016-08-10 16:42:15 +02:00
|
|
|
let file_name = entry.file_name();
|
2016-10-25 22:34:52 +02:00
|
|
|
let name = file_name.to_string_lossy();
|
2016-08-10 16:42:15 +02:00
|
|
|
// filter directories
|
2016-10-25 22:34:52 +02:00
|
|
|
metadata.map_or(false, |m| !m.is_dir()) &&
|
2017-02-16 16:20:24 +01:00
|
|
|
// hidden files
|
|
|
|
!name.starts_with(".") &&
|
|
|
|
// other ignored files
|
|
|
|
!IGNORED_FILES.contains(&&*name)
|
2016-06-20 00:10:34 +02:00
|
|
|
})
|
|
|
|
.map(|entry| entry.path())
|
2017-02-16 16:20:24 +01:00
|
|
|
.collect::<Vec<PathBuf>>()
|
|
|
|
)
|
|
|
|
}
|
2016-06-20 00:10:34 +02:00
|
|
|
|
2017-02-16 20:53:58 +01:00
|
|
|
pub fn files_hash(&self) -> Result<u64, Error> {
|
|
|
|
use std::collections::hash_map::DefaultHasher;
|
|
|
|
use std::hash::Hasher;
|
|
|
|
|
|
|
|
let mut hasher = DefaultHasher::new();
|
2017-02-16 16:47:58 +01:00
|
|
|
let files = self.files()?;
|
2017-02-16 20:53:58 +01:00
|
|
|
for file in files {
|
|
|
|
hasher.write(file.to_str().unwrap_or("").as_bytes())
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(hasher.finish())
|
2017-02-16 16:47:58 +01:00
|
|
|
}
|
|
|
|
|
2017-10-20 20:20:41 +02:00
|
|
|
fn last_modification_date(&self) -> Result<u64, Error> {
|
|
|
|
use std::time::{Duration, UNIX_EPOCH};
|
|
|
|
let duration = fs::metadata(&self.path)?.modified()?.duration_since(UNIX_EPOCH).unwrap_or(Duration::default());
|
|
|
|
let timestamp = duration.as_secs() ^ (duration.subsec_nanos() as u64);
|
|
|
|
Ok(timestamp)
|
|
|
|
}
|
|
|
|
|
2017-02-16 16:20:24 +01:00
|
|
|
/// all accounts found in keys directory
|
|
|
|
fn files_content(&self) -> Result<HashMap<PathBuf, SafeAccount>, Error> {
|
|
|
|
// it's not done using one iterator cause
|
|
|
|
// there is an issue with rustc and it takes tooo much time to compile
|
|
|
|
let paths = self.files()?;
|
2016-12-20 16:34:53 +01:00
|
|
|
Ok(paths
|
2017-01-30 11:44:09 +01:00
|
|
|
.into_iter()
|
|
|
|
.filter_map(|path| {
|
|
|
|
let filename = Some(path.file_name().and_then(|n| n.to_str()).expect("Keys have valid UTF8 names only.").to_owned());
|
|
|
|
fs::File::open(path.clone())
|
|
|
|
.map_err(Into::into)
|
|
|
|
.and_then(|file| self.key_manager.read(filename, file))
|
|
|
|
.map_err(|err| {
|
|
|
|
warn!("Invalid key file: {:?} ({})", path, err);
|
|
|
|
err
|
|
|
|
})
|
|
|
|
.map(|account| (path, account))
|
|
|
|
.ok()
|
2016-07-28 20:26:07 +02:00
|
|
|
})
|
|
|
|
.collect()
|
2016-12-20 16:34:53 +01:00
|
|
|
)
|
2016-06-20 00:10:34 +02:00
|
|
|
}
|
2017-01-30 11:44:09 +01:00
|
|
|
|
2018-02-14 14:21:58 +01:00
|
|
|
|
|
|
|
/// insert account with given filename. if the filename is a duplicate of any stored account and dedup is set to
|
|
|
|
/// true, a random suffix is appended to the filename.
|
|
|
|
pub fn insert_with_filename(&self, account: SafeAccount, mut filename: String, dedup: bool) -> Result<SafeAccount, Error> {
|
|
|
|
// path to keyfile
|
|
|
|
let mut keyfile_path = self.path.join(filename.as_str());
|
|
|
|
|
|
|
|
// check for duplicate filename and append random suffix
|
|
|
|
if dedup && keyfile_path.exists() {
|
|
|
|
let suffix = ::random::random_string(4);
|
|
|
|
filename.push_str(&format!("-{}", suffix));
|
|
|
|
keyfile_path.set_file_name(&filename);
|
|
|
|
}
|
|
|
|
|
2017-01-30 11:44:09 +01:00
|
|
|
// update account filename
|
|
|
|
let original_account = account.clone();
|
|
|
|
let mut account = account;
|
2018-02-14 14:21:58 +01:00
|
|
|
account.filename = Some(filename);
|
2017-01-30 11:44:09 +01:00
|
|
|
|
|
|
|
{
|
|
|
|
// save the file
|
|
|
|
let mut file = fs::File::create(&keyfile_path)?;
|
2018-02-12 18:03:37 +01:00
|
|
|
|
2018-02-14 14:21:58 +01:00
|
|
|
// write key content
|
2018-02-12 18:03:37 +01:00
|
|
|
self.key_manager.write(original_account, &mut file).map_err(|e| Error::Custom(format!("{:?}", e)))?;
|
|
|
|
|
|
|
|
file.flush()?;
|
2017-01-30 11:44:09 +01:00
|
|
|
|
|
|
|
if let Err(_) = restrict_permissions_to_owner(keyfile_path.as_path()) {
|
|
|
|
return Err(Error::Io(io::Error::last_os_error()));
|
|
|
|
}
|
2018-02-12 18:03:37 +01:00
|
|
|
|
|
|
|
file.sync_all()?;
|
2017-01-30 11:44:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(account)
|
|
|
|
}
|
|
|
|
|
2017-02-08 13:53:39 +01:00
|
|
|
/// Get key file manager referece
|
2017-01-30 11:44:09 +01:00
|
|
|
pub fn key_manager(&self) -> &T {
|
|
|
|
&self.key_manager
|
|
|
|
}
|
2016-06-20 00:10:34 +02:00
|
|
|
}
|
|
|
|
|
2017-01-30 11:44:09 +01:00
|
|
|
impl<T> KeyDirectory for DiskDirectory<T> where T: KeyFileManager {
|
2016-06-20 00:10:34 +02:00
|
|
|
fn load(&self) -> Result<Vec<SafeAccount>, Error> {
|
2017-02-16 16:20:24 +01:00
|
|
|
let accounts = self.files_content()?
|
2016-06-20 00:10:34 +02:00
|
|
|
.into_iter()
|
|
|
|
.map(|(_, account)| account)
|
|
|
|
.collect();
|
|
|
|
Ok(accounts)
|
|
|
|
}
|
|
|
|
|
2016-12-09 09:31:58 +01:00
|
|
|
fn update(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
|
|
|
|
// Disk store handles updates correctly iff filename is the same
|
2018-02-14 14:21:58 +01:00
|
|
|
let filename = account_filename(&account);
|
|
|
|
self.insert_with_filename(account, filename, false)
|
2016-12-09 09:31:58 +01:00
|
|
|
}
|
|
|
|
|
2016-07-25 10:45:45 +02:00
|
|
|
fn insert(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
|
2018-02-14 14:21:58 +01:00
|
|
|
let filename = account_filename(&account);
|
|
|
|
self.insert_with_filename(account, filename, true)
|
2016-06-20 00:10:34 +02:00
|
|
|
}
|
|
|
|
|
2016-11-30 13:47:14 +01:00
|
|
|
fn remove(&self, account: &SafeAccount) -> Result<(), Error> {
|
2016-06-20 00:10:34 +02:00
|
|
|
// enumerate all entries in keystore
|
|
|
|
// and find entry with given address
|
2017-02-16 16:20:24 +01:00
|
|
|
let to_remove = self.files_content()?
|
2016-06-20 00:10:34 +02:00
|
|
|
.into_iter()
|
2017-02-05 16:17:56 +01:00
|
|
|
.find(|&(_, ref acc)| acc.id == account.id && acc.address == account.address);
|
2016-06-20 00:10:34 +02:00
|
|
|
|
|
|
|
// remove it
|
|
|
|
match to_remove {
|
|
|
|
None => Err(Error::InvalidAccount),
|
|
|
|
Some((path, _)) => fs::remove_file(path).map_err(From::from)
|
|
|
|
}
|
|
|
|
}
|
2016-08-11 18:31:28 +02:00
|
|
|
|
|
|
|
fn path(&self) -> Option<&PathBuf> { Some(&self.path) }
|
2017-01-30 11:44:09 +01:00
|
|
|
|
|
|
|
fn as_vault_provider(&self) -> Option<&VaultKeyDirectoryProvider> {
|
|
|
|
Some(self)
|
|
|
|
}
|
2017-02-16 20:10:29 +01:00
|
|
|
|
2017-04-11 10:24:56 +02:00
|
|
|
fn unique_repr(&self) -> Result<u64, Error> {
|
2017-10-20 20:20:41 +02:00
|
|
|
self.last_modification_date()
|
2017-02-16 20:10:29 +01:00
|
|
|
}
|
2017-01-30 11:44:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> VaultKeyDirectoryProvider for DiskDirectory<T> where T: KeyFileManager {
|
|
|
|
fn create(&self, name: &str, key: VaultKey) -> Result<Box<VaultKeyDirectory>, Error> {
|
|
|
|
let vault_dir = VaultDiskDirectory::create(&self.path, name, key)?;
|
|
|
|
Ok(Box::new(vault_dir))
|
|
|
|
}
|
|
|
|
|
|
|
|
fn open(&self, name: &str, key: VaultKey) -> Result<Box<VaultKeyDirectory>, Error> {
|
|
|
|
let vault_dir = VaultDiskDirectory::at(&self.path, name, key)?;
|
|
|
|
Ok(Box::new(vault_dir))
|
|
|
|
}
|
2017-02-05 16:17:56 +01:00
|
|
|
|
|
|
|
fn list_vaults(&self) -> Result<Vec<String>, Error> {
|
|
|
|
Ok(fs::read_dir(&self.path)?
|
|
|
|
.filter_map(|e| e.ok().map(|e| e.path()))
|
|
|
|
.filter_map(|path| {
|
|
|
|
let mut vault_file_path = path.clone();
|
|
|
|
vault_file_path.push(VAULT_FILE_NAME);
|
|
|
|
if vault_file_path.is_file() {
|
|
|
|
path.file_name().and_then(|f| f.to_str()).map(|f| f.to_owned())
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect())
|
|
|
|
}
|
2017-02-09 16:47:22 +01:00
|
|
|
|
|
|
|
fn vault_meta(&self, name: &str) -> Result<String, Error> {
|
|
|
|
VaultDiskDirectory::meta_at(&self.path, name)
|
|
|
|
}
|
2016-06-20 00:10:34 +02:00
|
|
|
}
|
2016-08-03 17:58:22 +02:00
|
|
|
|
2017-01-30 11:44:09 +01:00
|
|
|
impl KeyFileManager for DiskKeyFileManager {
|
|
|
|
fn read<T>(&self, filename: Option<String>, reader: T) -> Result<SafeAccount, Error> where T: io::Read {
|
|
|
|
let key_file = json::KeyFile::load(reader).map_err(|e| Error::Custom(format!("{:?}", e)))?;
|
|
|
|
Ok(SafeAccount::from_file(key_file, filename))
|
|
|
|
}
|
|
|
|
|
2017-02-09 16:47:22 +01:00
|
|
|
fn write<T>(&self, mut account: SafeAccount, writer: &mut T) -> Result<(), Error> where T: io::Write {
|
|
|
|
// when account is moved back to root directory from vault
|
|
|
|
// => remove vault field from meta
|
|
|
|
account.meta = json::remove_vault_name_from_json_meta(&account.meta)
|
|
|
|
.map_err(|err| Error::Custom(format!("{:?}", err)))?;
|
|
|
|
|
2017-01-30 11:44:09 +01:00
|
|
|
let key_file: json::KeyFile = account.into();
|
|
|
|
key_file.write(writer).map_err(|e| Error::Custom(format!("{:?}", e)))
|
|
|
|
}
|
|
|
|
}
|
2016-08-03 17:58:22 +02:00
|
|
|
|
2018-02-14 14:21:58 +01:00
|
|
|
fn account_filename(account: &SafeAccount) -> String {
|
|
|
|
// build file path
|
|
|
|
account.filename.clone().unwrap_or_else(|| {
|
|
|
|
let timestamp = time::strftime("%Y-%m-%dT%H-%M-%S", &time::now_utc()).expect("Time-format string is valid.");
|
|
|
|
format!("UTC--{}Z--{}", timestamp, Uuid::from(account.id))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-08-03 17:58:22 +02:00
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
2017-04-11 10:24:56 +02:00
|
|
|
extern crate tempdir;
|
|
|
|
|
2016-08-03 17:58:22 +02:00
|
|
|
use std::{env, fs};
|
2017-12-24 09:34:43 +01:00
|
|
|
use super::{KeyDirectory, RootDiskDirectory, VaultKey};
|
2016-08-03 17:58:22 +02:00
|
|
|
use account::SafeAccount;
|
|
|
|
use ethkey::{Random, Generator};
|
2017-04-11 10:24:56 +02:00
|
|
|
use self::tempdir::TempDir;
|
2016-08-03 17:58:22 +02:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn should_create_new_account() {
|
|
|
|
// given
|
2016-10-18 16:36:43 +02:00
|
|
|
let mut dir = env::temp_dir();
|
|
|
|
dir.push("ethstore_should_create_new_account");
|
2016-08-03 17:58:22 +02:00
|
|
|
let keypair = Random.generate().unwrap();
|
|
|
|
let password = "hello world";
|
2017-01-30 11:44:09 +01:00
|
|
|
let directory = RootDiskDirectory::create(dir.clone()).unwrap();
|
2016-08-03 17:58:22 +02:00
|
|
|
|
|
|
|
// when
|
|
|
|
let account = SafeAccount::create(&keypair, [0u8; 16], password, 1024, "Test".to_owned(), "{}".to_owned());
|
2018-05-05 11:02:33 +02:00
|
|
|
let res = directory.insert(account.unwrap());
|
2016-08-03 17:58:22 +02:00
|
|
|
|
|
|
|
// then
|
|
|
|
assert!(res.is_ok(), "Should save account succesfuly.");
|
|
|
|
assert!(res.unwrap().filename.is_some(), "Filename has been assigned.");
|
|
|
|
|
|
|
|
// cleanup
|
|
|
|
let _ = fs::remove_dir_all(dir);
|
|
|
|
}
|
2017-01-30 11:44:09 +01:00
|
|
|
|
2018-02-14 14:21:58 +01:00
|
|
|
#[test]
|
|
|
|
fn should_handle_duplicate_filenames() {
|
|
|
|
// given
|
|
|
|
let mut dir = env::temp_dir();
|
|
|
|
dir.push("ethstore_should_handle_duplicate_filenames");
|
|
|
|
let keypair = Random.generate().unwrap();
|
|
|
|
let password = "hello world";
|
|
|
|
let directory = RootDiskDirectory::create(dir.clone()).unwrap();
|
|
|
|
|
|
|
|
// when
|
2018-05-05 11:02:33 +02:00
|
|
|
let account = SafeAccount::create(&keypair, [0u8; 16], password, 1024, "Test".to_owned(), "{}".to_owned()).unwrap();
|
2018-02-14 14:21:58 +01:00
|
|
|
let filename = "test".to_string();
|
|
|
|
let dedup = true;
|
|
|
|
|
|
|
|
directory.insert_with_filename(account.clone(), "foo".to_string(), dedup).unwrap();
|
|
|
|
let file1 = directory.insert_with_filename(account.clone(), filename.clone(), dedup).unwrap().filename.unwrap();
|
|
|
|
let file2 = directory.insert_with_filename(account.clone(), filename.clone(), dedup).unwrap().filename.unwrap();
|
|
|
|
let file3 = directory.insert_with_filename(account.clone(), filename.clone(), dedup).unwrap().filename.unwrap();
|
|
|
|
|
|
|
|
// then
|
|
|
|
// the first file should have the original names
|
|
|
|
assert_eq!(file1, filename);
|
|
|
|
|
|
|
|
// the following duplicate files should have a suffix appended
|
|
|
|
assert!(file2 != file3);
|
|
|
|
assert_eq!(file2.len(), filename.len() + 5);
|
|
|
|
assert_eq!(file3.len(), filename.len() + 5);
|
|
|
|
|
|
|
|
// cleanup
|
|
|
|
let _ = fs::remove_dir_all(dir);
|
|
|
|
}
|
|
|
|
|
2017-01-30 11:44:09 +01:00
|
|
|
#[test]
|
|
|
|
fn should_manage_vaults() {
|
|
|
|
// given
|
|
|
|
let mut dir = env::temp_dir();
|
|
|
|
dir.push("should_create_new_vault");
|
|
|
|
let directory = RootDiskDirectory::create(dir.clone()).unwrap();
|
|
|
|
let vault_name = "vault";
|
|
|
|
let password = "password";
|
|
|
|
|
|
|
|
// then
|
|
|
|
assert!(directory.as_vault_provider().is_some());
|
|
|
|
|
|
|
|
// and when
|
|
|
|
let before_root_items_count = fs::read_dir(&dir).unwrap().count();
|
|
|
|
let vault = directory.as_vault_provider().unwrap().create(vault_name, VaultKey::new(password, 1024));
|
|
|
|
|
|
|
|
// then
|
|
|
|
assert!(vault.is_ok());
|
|
|
|
let after_root_items_count = fs::read_dir(&dir).unwrap().count();
|
|
|
|
assert!(after_root_items_count > before_root_items_count);
|
|
|
|
|
|
|
|
// and when
|
|
|
|
let vault = directory.as_vault_provider().unwrap().open(vault_name, VaultKey::new(password, 1024));
|
|
|
|
|
|
|
|
// then
|
|
|
|
assert!(vault.is_ok());
|
|
|
|
let after_root_items_count2 = fs::read_dir(&dir).unwrap().count();
|
|
|
|
assert!(after_root_items_count == after_root_items_count2);
|
|
|
|
|
|
|
|
// cleanup
|
|
|
|
let _ = fs::remove_dir_all(dir);
|
|
|
|
}
|
2017-02-05 16:17:56 +01:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn should_list_vaults() {
|
|
|
|
// given
|
2017-04-11 10:24:56 +02:00
|
|
|
let temp_path = TempDir::new("").unwrap();
|
2017-02-05 16:17:56 +01:00
|
|
|
let directory = RootDiskDirectory::create(&temp_path).unwrap();
|
|
|
|
let vault_provider = directory.as_vault_provider().unwrap();
|
|
|
|
vault_provider.create("vault1", VaultKey::new("password1", 1)).unwrap();
|
|
|
|
vault_provider.create("vault2", VaultKey::new("password2", 1)).unwrap();
|
|
|
|
|
|
|
|
// then
|
|
|
|
let vaults = vault_provider.list_vaults().unwrap();
|
|
|
|
assert_eq!(vaults.len(), 2);
|
|
|
|
assert!(vaults.iter().any(|v| &*v == "vault1"));
|
|
|
|
assert!(vaults.iter().any(|v| &*v == "vault2"));
|
|
|
|
}
|
2017-02-16 16:47:58 +01:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn hash_of_files() {
|
2017-04-11 10:24:56 +02:00
|
|
|
let temp_path = TempDir::new("").unwrap();
|
2017-02-16 16:47:58 +01:00
|
|
|
let directory = RootDiskDirectory::create(&temp_path).unwrap();
|
|
|
|
|
2017-04-11 10:24:56 +02:00
|
|
|
let hash = directory.files_hash().expect("Files hash should be calculated ok");
|
|
|
|
assert_eq!(
|
2017-02-16 16:47:58 +01:00
|
|
|
hash,
|
2017-02-16 20:53:58 +01:00
|
|
|
15130871412783076140
|
2017-02-16 16:47:58 +01:00
|
|
|
);
|
|
|
|
|
|
|
|
let keypair = Random.generate().unwrap();
|
|
|
|
let password = "test pass";
|
|
|
|
let account = SafeAccount::create(&keypair, [0u8; 16], password, 1024, "Test".to_owned(), "{}".to_owned());
|
2018-05-05 11:02:33 +02:00
|
|
|
directory.insert(account.unwrap()).expect("Account should be inserted ok");
|
2017-02-16 16:47:58 +01:00
|
|
|
|
2017-04-11 10:24:56 +02:00
|
|
|
let new_hash = directory.files_hash().expect("New files hash should be calculated ok");
|
2017-02-16 16:47:58 +01:00
|
|
|
|
|
|
|
assert!(new_hash != hash, "hash of the file list should change once directory content changed");
|
|
|
|
}
|
2016-08-03 17:58:22 +02:00
|
|
|
}
|