[secretstore] migrate to version 4 (#11322)
* secret-store: migrate the db to version 4 * Fix secretstore build * Fix secretstore build: include ethkey when building with the "accounts" feature * fix build * secret-store: actually use new column * a bunch of fixes * last nits * Apply suggestions from code review Co-Authored-By: Niklas Adolfsson <niklasadolfsson1@gmail.com> * secret-store: move db stuff to secret-store as per Anton's request
This commit is contained in:
parent
4fa78e0537
commit
ae74e8df78
@ -20,6 +20,3 @@
|
||||
mod impls;
|
||||
|
||||
pub use self::impls::{open_db_light, restoration_db_handler, migrate};
|
||||
|
||||
#[cfg(feature = "secretstore")]
|
||||
pub use self::impls::open_secretstore_db;
|
||||
|
@ -217,11 +217,11 @@ pub fn migrate(path: &Path, compaction_profile: &DatabaseCompactionProfile) -> R
|
||||
|
||||
// Further migrations
|
||||
if version < CURRENT_VERSION && exists(&db_path) {
|
||||
println!("Migrating database from version {} to {}", version, CURRENT_VERSION);
|
||||
info!(target: "migration", "Migrating database from version {} to {}", version, CURRENT_VERSION);
|
||||
migrate_database(version, &db_path, consolidated_database_migrations(&compaction_profile)?)?;
|
||||
|
||||
if version < BLOOMS_DB_VERSION {
|
||||
println!("Migrating blooms to blooms-db...");
|
||||
info!(target: "migration", "Migrating blooms to blooms-db...");
|
||||
let db_config = DatabaseConfig {
|
||||
max_open_files: 64,
|
||||
compaction: compaction_profile,
|
||||
@ -232,7 +232,7 @@ pub fn migrate(path: &Path, compaction_profile: &DatabaseCompactionProfile) -> R
|
||||
migrate_blooms(&db_path, &db_config).map_err(Error::BloomsDB)?;
|
||||
}
|
||||
|
||||
println!("Migration finished");
|
||||
info!(target: "migration", "Migration finished");
|
||||
}
|
||||
|
||||
// update version file.
|
||||
|
@ -18,6 +18,9 @@ extern crate kvdb_rocksdb;
|
||||
extern crate migration_rocksdb;
|
||||
extern crate ethcore_blockchain;
|
||||
|
||||
#[cfg(test)]
|
||||
extern crate tempdir;
|
||||
|
||||
use std::{io, fs};
|
||||
use std::sync::Arc;
|
||||
use std::path::Path;
|
||||
@ -56,17 +59,6 @@ impl BlockChainDB for AppDB {
|
||||
}
|
||||
}
|
||||
|
||||
/// Open a secret store DB using the given secret store data path. The DB path is one level beneath the data path.
|
||||
#[cfg(feature = "secretstore")]
|
||||
pub fn open_secretstore_db(data_path: &str) -> Result<Arc<dyn KeyValueDB>, String> {
|
||||
use std::path::PathBuf;
|
||||
|
||||
let mut db_path = PathBuf::from(data_path);
|
||||
db_path.push("db");
|
||||
let db_path = db_path.to_str().ok_or_else(|| "Invalid secretstore path".to_string())?;
|
||||
Ok(Arc::new(Database::open_default(&db_path).map_err(|e| format!("Error opening database: {:?}", e))?))
|
||||
}
|
||||
|
||||
/// Create a restoration db handler using the config generated by `client_path` and `client_config`.
|
||||
pub fn restoration_db_handler(client_path: &Path, client_config: &ClientConfig) -> Box<dyn BlockChainDBHandler> {
|
||||
let client_db_config = helpers::client_db_config(client_path, client_config);
|
||||
|
@ -124,7 +124,6 @@ mod server {
|
||||
use ethcore_secretstore;
|
||||
use parity_crypto::publickey::KeyPair;
|
||||
use ansi_term::Colour::{Red, White};
|
||||
use db;
|
||||
use super::{Configuration, Dependencies, NodeSecretKey, ContractAddress, Executor};
|
||||
|
||||
fn into_service_contract_address(address: ContractAddress) -> ethcore_secretstore::ContractAddress {
|
||||
@ -136,13 +135,13 @@ mod server {
|
||||
|
||||
/// Key server
|
||||
pub struct KeyServer {
|
||||
_key_server: Box<ethcore_secretstore::KeyServer>,
|
||||
_key_server: Box<dyn ethcore_secretstore::KeyServer>,
|
||||
}
|
||||
|
||||
impl KeyServer {
|
||||
/// Create new key server
|
||||
pub fn new(mut conf: Configuration, deps: Dependencies, executor: Executor) -> Result<Self, String> {
|
||||
let self_secret: Arc<ethcore_secretstore::NodeKeyPair> = match conf.self_secret.take() {
|
||||
let self_secret: Arc<dyn ethcore_secretstore::NodeKeyPair> = match conf.self_secret.take() {
|
||||
Some(NodeSecretKey::Plain(secret)) => Arc::new(ethcore_secretstore::PlainNodeKeyPair::new(
|
||||
KeyPair::from_secret(secret).map_err(|e| format!("invalid secret: {}", e))?)),
|
||||
#[cfg(feature = "accounts")]
|
||||
@ -203,7 +202,7 @@ mod server {
|
||||
|
||||
cconf.cluster_config.nodes.insert(self_secret.public().clone(), cconf.cluster_config.listener_address.clone());
|
||||
|
||||
let db = db::open_secretstore_db(&conf.data_path)?;
|
||||
let db = ethcore_secretstore::open_secretstore_db(&conf.data_path)?;
|
||||
let key_server = ethcore_secretstore::start(deps.client, deps.sync, deps.miner, self_secret, cconf, db, executor)
|
||||
.map_err(|e| format!("Error starting KeyServer {}: {}", key_server_name, e))?;
|
||||
|
||||
|
@ -17,10 +17,12 @@ ethcore-accounts = { path = "../accounts", optional = true}
|
||||
ethcore-call-contract = { path = "../ethcore/call-contract" }
|
||||
ethcore-sync = { path = "../ethcore/sync" }
|
||||
ethereum-types = "0.8.0"
|
||||
ethkey = { path = "../accounts/ethkey", optional = true }
|
||||
futures = "0.1"
|
||||
hyper = { version = "0.12", default-features = false }
|
||||
keccak-hash = "0.4.0"
|
||||
kvdb = "0.1"
|
||||
kvdb-rocksdb = "0.2.0"
|
||||
lazy_static = "1.0"
|
||||
log = "0.4"
|
||||
parity-bytes = "0.1"
|
||||
@ -48,4 +50,4 @@ tempdir = "0.3"
|
||||
kvdb-rocksdb = "0.2.0"
|
||||
|
||||
[features]
|
||||
accounts = ["ethcore-accounts"]
|
||||
accounts = ["ethcore-accounts", "ethkey"]
|
||||
|
@ -24,11 +24,6 @@ use kvdb::KeyValueDB;
|
||||
use types::{Error, ServerKeyId, NodeId};
|
||||
use serialization::{SerializablePublic, SerializableSecret, SerializableH256, SerializableAddress};
|
||||
|
||||
/// Key of version value.
|
||||
const DB_META_KEY_VERSION: &'static [u8; 7] = b"version";
|
||||
/// Current db version.
|
||||
const CURRENT_VERSION: u8 = 3;
|
||||
|
||||
/// Encrypted key share, stored by key storage on the single key server.
|
||||
#[derive(Debug, Default, Clone, PartialEq)]
|
||||
pub struct DocumentKeyShare {
|
||||
@ -116,26 +111,7 @@ struct SerializableDocumentKeyShareVersionV3 {
|
||||
impl PersistentKeyStorage {
|
||||
/// Create new persistent document encryption keys storage
|
||||
pub fn new(db: Arc<dyn KeyValueDB>) -> Result<Self, Error> {
|
||||
let db = upgrade_db(db)?;
|
||||
|
||||
Ok(PersistentKeyStorage {
|
||||
db: db,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn upgrade_db(db: Arc<dyn KeyValueDB>) -> Result<Arc<dyn KeyValueDB>, Error> {
|
||||
let version = db.get(None, DB_META_KEY_VERSION)?;
|
||||
let version = version.and_then(|v| v.get(0).cloned());
|
||||
match version {
|
||||
None => {
|
||||
let mut batch = db.transaction();
|
||||
batch.put(None, DB_META_KEY_VERSION, &[CURRENT_VERSION]);
|
||||
db.write(batch)?;
|
||||
Ok(db)
|
||||
},
|
||||
Some(CURRENT_VERSION) => Ok(db),
|
||||
_ => Err(Error::Database(format!("unsupported SecretStore database version: {:?}", version))),
|
||||
Ok(Self { db })
|
||||
}
|
||||
}
|
||||
|
||||
@ -144,7 +120,7 @@ impl KeyStorage for PersistentKeyStorage {
|
||||
let key: SerializableDocumentKeyShareV3 = key.into();
|
||||
let key = serde_json::to_vec(&key).map_err(|e| Error::Database(e.to_string()))?;
|
||||
let mut batch = self.db.transaction();
|
||||
batch.put(None, document.as_bytes(), &key);
|
||||
batch.put(Some(0), document.as_bytes(), &key);
|
||||
self.db.write(batch).map_err(Into::into)
|
||||
}
|
||||
|
||||
@ -153,7 +129,7 @@ impl KeyStorage for PersistentKeyStorage {
|
||||
}
|
||||
|
||||
fn get(&self, document: &ServerKeyId) -> Result<Option<DocumentKeyShare>, Error> {
|
||||
self.db.get(None, document.as_bytes())
|
||||
self.db.get(Some(0), document.as_bytes())
|
||||
.map_err(|e| Error::Database(e.to_string()))
|
||||
.and_then(|key| match key {
|
||||
None => Ok(None),
|
||||
@ -166,28 +142,28 @@ impl KeyStorage for PersistentKeyStorage {
|
||||
|
||||
fn remove(&self, document: &ServerKeyId) -> Result<(), Error> {
|
||||
let mut batch = self.db.transaction();
|
||||
batch.delete(None, document.as_bytes());
|
||||
batch.delete(Some(0), document.as_bytes());
|
||||
self.db.write(batch).map_err(Into::into)
|
||||
}
|
||||
|
||||
fn clear(&self) -> Result<(), Error> {
|
||||
let mut batch = self.db.transaction();
|
||||
for (key, _) in self.iter() {
|
||||
batch.delete(None, key.as_bytes());
|
||||
batch.delete(Some(0), key.as_bytes());
|
||||
}
|
||||
self.db.write(batch)
|
||||
.map_err(|e| Error::Database(e.to_string()))
|
||||
}
|
||||
|
||||
fn contains(&self, document: &ServerKeyId) -> bool {
|
||||
self.db.get(None, document.as_bytes())
|
||||
self.db.get(Some(0), document.as_bytes())
|
||||
.map(|k| k.is_some())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item=(ServerKeyId, DocumentKeyShare)> + 'a> {
|
||||
Box::new(PersistentKeyStorageIterator {
|
||||
iter: self.db.iter(None),
|
||||
iter: self.db.iter(Some(0)),
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -290,14 +266,12 @@ impl From<SerializableDocumentKeyShareV3> for DocumentKeyShare {
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
extern crate tempdir;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use parking_lot::RwLock;
|
||||
use self::tempdir::TempDir;
|
||||
use tempdir::TempDir;
|
||||
use crypto::publickey::{Random, Generator, Public};
|
||||
use kvdb_rocksdb::Database;
|
||||
use kvdb_rocksdb::{Database, DatabaseConfig};
|
||||
use types::{Error, ServerKeyId};
|
||||
use super::{KeyStorage, PersistentKeyStorage, DocumentKeyShare, DocumentKeyShareVersion};
|
||||
|
||||
@ -376,7 +350,8 @@ pub mod tests {
|
||||
};
|
||||
let key3 = ServerKeyId::from_low_u64_be(3);
|
||||
|
||||
let db = Database::open_default(&tempdir.path().display().to_string()).unwrap();
|
||||
let db_config = DatabaseConfig::with_columns(Some(1));
|
||||
let db = Database::open(&db_config, &tempdir.path().display().to_string()).unwrap();
|
||||
|
||||
let key_storage = PersistentKeyStorage::new(Arc::new(db)).unwrap();
|
||||
key_storage.insert(key1.clone(), value1.clone()).unwrap();
|
||||
@ -386,7 +361,7 @@ pub mod tests {
|
||||
assert_eq!(key_storage.get(&key3), Ok(None));
|
||||
drop(key_storage);
|
||||
|
||||
let db = Database::open_default(&tempdir.path().display().to_string()).unwrap();
|
||||
let db = Database::open(&db_config, &tempdir.path().display().to_string()).unwrap();
|
||||
|
||||
let key_storage = PersistentKeyStorage::new(Arc::new(db)).unwrap();
|
||||
assert_eq!(key_storage.get(&key1), Ok(Some(value1)));
|
||||
|
@ -25,6 +25,7 @@ extern crate ethereum_types;
|
||||
extern crate hyper;
|
||||
extern crate keccak_hash as hash;
|
||||
extern crate kvdb;
|
||||
extern crate kvdb_rocksdb;
|
||||
extern crate parity_bytes as bytes;
|
||||
extern crate parity_crypto as crypto;
|
||||
extern crate parity_runtime;
|
||||
@ -53,12 +54,12 @@ extern crate lazy_static;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
#[cfg(test)]
|
||||
#[cfg(any(test, feature = "accounts"))]
|
||||
extern crate ethkey;
|
||||
#[cfg(test)]
|
||||
extern crate env_logger;
|
||||
#[cfg(test)]
|
||||
extern crate kvdb_rocksdb;
|
||||
extern crate tempdir;
|
||||
|
||||
#[cfg(feature = "accounts")]
|
||||
extern crate ethcore_accounts as accounts;
|
||||
@ -76,9 +77,11 @@ mod key_server_set;
|
||||
mod node_key_pair;
|
||||
mod listener;
|
||||
mod trusted_client;
|
||||
mod migration;
|
||||
|
||||
use std::sync::Arc;
|
||||
use kvdb::KeyValueDB;
|
||||
use kvdb_rocksdb::{Database, DatabaseConfig};
|
||||
use ethcore::client::Client;
|
||||
use ethcore::miner::Miner;
|
||||
use sync::SyncProvider;
|
||||
@ -91,6 +94,20 @@ pub use self::node_key_pair::PlainNodeKeyPair;
|
||||
#[cfg(feature = "accounts")]
|
||||
pub use self::node_key_pair::KeyStoreNodeKeyPair;
|
||||
|
||||
/// Open a secret store DB using the given secret store data path. The DB path is one level beneath the data path.
|
||||
pub fn open_secretstore_db(data_path: &str) -> Result<Arc<dyn KeyValueDB>, String> {
|
||||
use std::path::PathBuf;
|
||||
|
||||
migration::upgrade_db(data_path).map_err(|e| e.to_string())?;
|
||||
|
||||
let mut db_path = PathBuf::from(data_path);
|
||||
db_path.push("db");
|
||||
let db_path = db_path.to_str().ok_or_else(|| "Invalid secretstore path".to_string())?;
|
||||
|
||||
let config = DatabaseConfig::with_columns(Some(1));
|
||||
Ok(Arc::new(Database::open(&config, &db_path).map_err(|e| format!("Error opening database: {:?}", e))?))
|
||||
}
|
||||
|
||||
/// Start new key server instance
|
||||
pub fn start(client: Arc<Client>, sync: Arc<dyn SyncProvider>, miner: Arc<Miner>, self_key_pair: Arc<dyn NodeKeyPair>, mut config: ServiceConfiguration,
|
||||
db: Arc<dyn KeyValueDB>, executor: Executor) -> Result<Box<dyn KeyServer>, Error>
|
||||
|
198
secret-store/src/migration.rs
Normal file
198
secret-store/src/migration.rs
Normal file
@ -0,0 +1,198 @@
|
||||
// Copyright 2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Secret Store DB migration module.
|
||||
|
||||
|
||||
use std::fmt::{Display, Error as FmtError, Formatter};
|
||||
use std::fs;
|
||||
use std::io::{Error as IoError, ErrorKind as IoErrorKind, Read as _, Write as _};
|
||||
use std::path::PathBuf;
|
||||
|
||||
use kvdb::DBTransaction;
|
||||
use kvdb_rocksdb::{Database, DatabaseConfig};
|
||||
|
||||
/// We used to store the version in the database (until version 4).
|
||||
const LEGACY_DB_META_KEY_VERSION: &[u8; 7] = b"version";
|
||||
/// Current db version.
|
||||
const CURRENT_VERSION: u8 = 4;
|
||||
/// Database is assumed to be at the default version, when no version file is found.
|
||||
const DEFAULT_VERSION: u8 = 3;
|
||||
/// Version file name.
|
||||
const VERSION_FILE_NAME: &str = "db_version";
|
||||
|
||||
/// Migration related erorrs.
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
/// Returned when current version cannot be read or guessed.
|
||||
UnknownDatabaseVersion,
|
||||
/// Existing DB is newer than the known one.
|
||||
FutureDBVersion,
|
||||
/// Migration was completed succesfully,
|
||||
/// but there was a problem with io.
|
||||
Io(IoError),
|
||||
}
|
||||
|
||||
impl Display for Error {
|
||||
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
|
||||
let out = match *self {
|
||||
Error::UnknownDatabaseVersion =>
|
||||
"Current Secret Store database version cannot be read".into(),
|
||||
Error::FutureDBVersion =>
|
||||
"Secret Store database was created with newer client version.\
|
||||
Upgrade your client or delete DB and resync.".into(),
|
||||
Error::Io(ref err) =>
|
||||
format!("Unexpected io error on Secret Store database migration: {}.", err),
|
||||
};
|
||||
write!(f, "{}", out)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<IoError> for Error {
|
||||
fn from(err: IoError) -> Self {
|
||||
Error::Io(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Moves "default" column to column 0 in preparation for a kvdb-rocksdb 0.3 migration.
|
||||
fn migrate_to_v4(parent_dir: &str) -> Result<(), Error> {
|
||||
// Naïve implementation until
|
||||
// https://github.com/facebook/rocksdb/issues/6130 is resolved
|
||||
let old_db_config = DatabaseConfig::with_columns(Some(1));
|
||||
let new_db_config = DatabaseConfig::with_columns(Some(1));
|
||||
const BATCH_SIZE: usize = 1024;
|
||||
|
||||
let old_dir = db_dir(parent_dir);
|
||||
let new_dir = migration_dir(parent_dir);
|
||||
let old_db = Database::open(&old_db_config, &old_dir)?;
|
||||
let new_db = Database::open(&new_db_config, &new_dir)?;
|
||||
|
||||
const OLD_COLUMN: Option<u32> = None;
|
||||
const NEW_COLUMN: Option<u32> = Some(0);
|
||||
|
||||
// remove legacy version key
|
||||
{
|
||||
let mut batch = DBTransaction::with_capacity(1);
|
||||
batch.delete(OLD_COLUMN, LEGACY_DB_META_KEY_VERSION);
|
||||
if let Err(err) = old_db.write(batch) {
|
||||
error!(target: "migration", "Failed to delete db version {}", &err);
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
|
||||
let mut batch = DBTransaction::with_capacity(BATCH_SIZE);
|
||||
for (i, (key, value)) in old_db.iter(OLD_COLUMN).enumerate() {
|
||||
batch.put(NEW_COLUMN, &key, &value);
|
||||
if i % BATCH_SIZE == 0 {
|
||||
new_db.write(batch)?;
|
||||
batch = DBTransaction::with_capacity(BATCH_SIZE);
|
||||
info!(target: "migration", "Migrating Secret Store DB: {} keys written", i);
|
||||
}
|
||||
}
|
||||
new_db.write(batch)?;
|
||||
drop(new_db);
|
||||
old_db.restore(&new_dir)?;
|
||||
|
||||
info!(target: "migration", "Secret Store migration finished");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Apply all migrations if possible.
|
||||
pub fn upgrade_db(db_path: &str) -> Result<(), Error> {
|
||||
match current_version(db_path)? {
|
||||
old_version if old_version < CURRENT_VERSION => {
|
||||
migrate_to_v4(db_path)?;
|
||||
update_version(db_path)?;
|
||||
Ok(())
|
||||
},
|
||||
CURRENT_VERSION => Ok(()),
|
||||
_ => Err(Error::FutureDBVersion),
|
||||
}
|
||||
}
|
||||
|
||||
fn db_dir(path: &str) -> String {
|
||||
let mut dir = PathBuf::from(path);
|
||||
dir.push("db");
|
||||
dir.to_string_lossy().to_string()
|
||||
}
|
||||
|
||||
fn migration_dir(path: &str) -> String {
|
||||
let mut dir = PathBuf::from(path);
|
||||
dir.push("migration");
|
||||
dir.to_string_lossy().to_string()
|
||||
}
|
||||
|
||||
/// Returns the version file path.
|
||||
fn version_file_path(path: &str) -> PathBuf {
|
||||
let mut file_path = PathBuf::from(path);
|
||||
file_path.push(VERSION_FILE_NAME);
|
||||
file_path
|
||||
}
|
||||
|
||||
/// Reads current database version from the file at given path.
|
||||
/// If the file does not exist returns `DEFAULT_VERSION`.
|
||||
fn current_version(path: &str) -> Result<u8, Error> {
|
||||
match fs::File::open(version_file_path(path)) {
|
||||
Err(ref err) if err.kind() == IoErrorKind::NotFound => Ok(DEFAULT_VERSION),
|
||||
Err(err) => Err(err.into()),
|
||||
Ok(mut file) => {
|
||||
let mut s = String::new();
|
||||
file.read_to_string(&mut s)?;
|
||||
u8::from_str_radix(&s, 10).map_err(|_| Error::UnknownDatabaseVersion)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Writes current database version to the file.
|
||||
/// Creates a new file if the version file does not exist yet.
|
||||
fn update_version(path: &str) -> Result<(), Error> {
|
||||
let mut file = fs::File::create(version_file_path(path))?;
|
||||
file.write_all(format!("{}", CURRENT_VERSION).as_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempdir::TempDir;
|
||||
|
||||
#[test]
|
||||
fn migration_works() -> Result<(), Error> {
|
||||
let parent = TempDir::new("secret_store_migration")?.into_path();
|
||||
|
||||
let mut db_path = parent.clone();
|
||||
db_path.push("db");
|
||||
let db_path = db_path.to_str().unwrap();
|
||||
let parent_path = parent.to_str().unwrap();
|
||||
|
||||
let old_db = Database::open(&DatabaseConfig::with_columns(None), db_path)?;
|
||||
|
||||
let mut batch = old_db.transaction();
|
||||
batch.put(None, b"key1", b"value1");
|
||||
batch.put(None, b"key2", b"value2");
|
||||
old_db.write(batch)?;
|
||||
drop(old_db);
|
||||
|
||||
upgrade_db(parent_path)?;
|
||||
let migrated = Database::open(&DatabaseConfig::with_columns(Some(1)), db_path)?;
|
||||
|
||||
assert_eq!(migrated.get(Some(0), b"key1")?.expect("key1"), b"value1".to_vec());
|
||||
assert_eq!(migrated.get(Some(0), b"key2")?.expect("key2"), b"value2".to_vec());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -74,10 +74,10 @@ mod accounts {
|
||||
pub fn new(account_provider: Arc<AccountProvider>, address: Address, password: Password) -> Result<Self, EthKeyError> {
|
||||
let public = account_provider.account_public(address.clone(), &password).map_err(|e| EthKeyError::Custom(format!("{}", e)))?;
|
||||
Ok(KeyStoreNodeKeyPair {
|
||||
account_provider: account_provider,
|
||||
address: address,
|
||||
public: public,
|
||||
password: password,
|
||||
account_provider,
|
||||
address,
|
||||
public,
|
||||
password,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user