2017-01-25 18:51:41 +01:00
|
|
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
2016-05-26 18:24:51 +02:00
|
|
|
// This file is part of Parity.
|
|
|
|
|
|
|
|
// Parity is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
|
|
|
// Parity is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
|
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2016-05-27 08:23:29 +02:00
|
|
|
use std::fs;
|
2016-05-26 18:24:51 +02:00
|
|
|
use std::fs::File;
|
|
|
|
use std::io::{Read, Write, Error as IoError, ErrorKind};
|
2016-07-11 09:46:33 +02:00
|
|
|
use std::path::{Path, PathBuf};
|
2016-05-26 18:24:51 +02:00
|
|
|
use std::fmt::{Display, Formatter, Error as FmtError};
|
2016-10-01 14:33:19 +02:00
|
|
|
use std::sync::Arc;
|
2017-10-17 06:41:05 +02:00
|
|
|
use journaldb::Algorithm;
|
2017-10-16 12:11:35 +02:00
|
|
|
use migr::{self, Manager as MigrationManager, Config as MigrationConfig, Migration};
|
|
|
|
use kvdb;
|
2017-10-12 15:36:27 +02:00
|
|
|
use kvdb_rocksdb::{CompactionProfile, Database, DatabaseConfig};
|
2016-05-26 18:24:51 +02:00
|
|
|
use ethcore::migrations;
|
2016-08-18 18:24:49 +02:00
|
|
|
use ethcore::db;
|
2016-07-28 23:46:24 +02:00
|
|
|
use ethcore::migrations::Extract;
|
2016-05-26 18:24:51 +02:00
|
|
|
|
|
|
|
/// Database is assumed to be at default version, when no version file is found.
|
|
|
|
const DEFAULT_VERSION: u32 = 5;
|
|
|
|
/// Current version of database models.
|
2017-03-22 18:32:04 +01:00
|
|
|
const CURRENT_VERSION: u32 = 12;
|
2016-07-28 23:46:24 +02:00
|
|
|
/// First version of the consolidated database.
|
|
|
|
const CONSOLIDATION_VERSION: u32 = 9;
|
2016-05-26 18:24:51 +02:00
|
|
|
/// Defines how many items are migrated to the new version of database at once.
|
|
|
|
const BATCH_SIZE: usize = 1024;
|
|
|
|
/// Version file name.
|
|
|
|
const VERSION_FILE_NAME: &'static str = "db_version";
|
|
|
|
|
|
|
|
/// Migration related erorrs.
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum Error {
|
|
|
|
/// Returned when current version cannot be read or guessed.
|
|
|
|
UnknownDatabaseVersion,
|
2016-07-16 10:41:09 +02:00
|
|
|
/// Migration does not support existing pruning algorithm.
|
2016-09-23 18:36:50 +02:00
|
|
|
UnsupportedPruningMethod,
|
2016-07-16 10:41:09 +02:00
|
|
|
/// Existing DB is newer than the known one.
|
|
|
|
FutureDBVersion,
|
|
|
|
/// Migration is not possible.
|
2016-05-26 18:24:51 +02:00
|
|
|
MigrationImpossible,
|
2016-07-16 10:41:09 +02:00
|
|
|
/// Migration unexpectadly failed.
|
2016-05-26 18:24:51 +02:00
|
|
|
MigrationFailed,
|
2016-09-23 18:36:50 +02:00
|
|
|
/// Internal migration error.
|
2017-10-16 12:11:35 +02:00
|
|
|
Internal(migr::Error),
|
2016-07-16 10:41:09 +02:00
|
|
|
/// Migration was completed succesfully,
|
2016-05-26 18:24:51 +02:00
|
|
|
/// but there was a problem with io.
|
|
|
|
Io(IoError),
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Display for Error {
|
|
|
|
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
|
|
|
|
let out = match *self {
|
|
|
|
Error::UnknownDatabaseVersion => "Current database version cannot be read".into(),
|
2016-09-23 18:36:50 +02:00
|
|
|
Error::UnsupportedPruningMethod => "Unsupported pruning method for database migration. Delete DB and resync.".into(),
|
2016-07-16 10:41:09 +02:00
|
|
|
Error::FutureDBVersion => "Database was created with newer client version. Upgrade your client or delete DB and resync.".into(),
|
|
|
|
Error::MigrationImpossible => format!("Database migration to version {} is not possible.", CURRENT_VERSION),
|
|
|
|
Error::MigrationFailed => "Database migration unexpectedly failed".into(),
|
2016-09-23 18:36:50 +02:00
|
|
|
Error::Internal(ref err) => format!("{}", err),
|
2016-07-16 10:41:09 +02:00
|
|
|
Error::Io(ref err) => format!("Unexpected io error on DB migration: {}.", err),
|
2016-05-26 18:24:51 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
write!(f, "{}", out)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<IoError> for Error {
|
|
|
|
fn from(err: IoError) -> Self {
|
|
|
|
Error::Io(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-16 12:11:35 +02:00
|
|
|
impl From<migr::Error> for Error {
|
|
|
|
fn from(err: migr::Error) -> Self {
|
|
|
|
match err.into() {
|
|
|
|
migr::ErrorKind::Io(e) => Error::Io(e),
|
|
|
|
err => Error::Internal(err.into()),
|
2016-07-06 12:05:23 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-26 18:24:51 +02:00
|
|
|
/// Returns the version file path.
|
2016-07-11 09:46:33 +02:00
|
|
|
fn version_file_path(path: &Path) -> PathBuf {
|
|
|
|
let mut file_path = path.to_owned();
|
2016-05-26 18:24:51 +02:00
|
|
|
file_path.push(VERSION_FILE_NAME);
|
|
|
|
file_path
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Reads current database version from the file at given path.
|
2016-05-27 17:56:25 +02:00
|
|
|
/// If the file does not exist returns `DEFAULT_VERSION`.
|
2016-07-11 09:46:33 +02:00
|
|
|
fn current_version(path: &Path) -> Result<u32, Error> {
|
2016-05-26 18:24:51 +02:00
|
|
|
match File::open(version_file_path(path)) {
|
|
|
|
Err(ref err) if err.kind() == ErrorKind::NotFound => Ok(DEFAULT_VERSION),
|
|
|
|
Err(_) => Err(Error::UnknownDatabaseVersion),
|
|
|
|
Ok(mut file) => {
|
|
|
|
let mut s = String::new();
|
2016-12-27 12:53:56 +01:00
|
|
|
file.read_to_string(&mut s).map_err(|_| Error::UnknownDatabaseVersion)?;
|
2016-05-26 18:24:51 +02:00
|
|
|
u32::from_str_radix(&s, 10).map_err(|_| Error::UnknownDatabaseVersion)
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Writes current database version to the file.
|
|
|
|
/// Creates a new file if the version file does not exist yet.
|
2016-07-11 09:46:33 +02:00
|
|
|
fn update_version(path: &Path) -> Result<(), Error> {
|
2016-12-27 12:53:56 +01:00
|
|
|
fs::create_dir_all(path)?;
|
|
|
|
let mut file = File::create(version_file_path(path))?;
|
|
|
|
file.write_all(format!("{}", CURRENT_VERSION).as_bytes())?;
|
2016-05-26 18:24:51 +02:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
/// Consolidated database path
|
|
|
|
fn consolidated_database_path(path: &Path) -> PathBuf {
|
Blocks and snapshot compression (#1687)
* new Compressible rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* make compressed rlp iterable
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* add basic account compression test
* add new rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* invalid rlp slice swapper
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* add account compress/ decompress test
* make compressor cleaner, use hashmaps for swapper
* improve compression tests
* add a DecompressingDecoder, change Decoder to take refernce
* separate rlp compression related stuff
* new Compressible rlp trait
* new Compressible rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* make compressed rlp iterable
* make compressed rlp iterable
* invalid rlp slice swapper
* invalid rlp slice swapper
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* add basic account compression test
* add new rlp trait
* add account compress/ decompress test
* make compressor cleaner, use hashmaps for swapper
* improve compression tests
* add a DecompressingDecoder, change Decoder to take refernce
* separate rlp compression related stuff
* DecompressingDecoder test
* initial compressing HashDB wrapper
* remove unused test
* change CompressedDB to struct wrapper with overlay
* simplify compressor
* failed RefCell attempt
* use denote to return reference
* compiled compresseddb
* compressdb test, add overlay emplace
* fix overlay reference count handling
* add immutable compresseddb, make account use hashdb
* simplify using trait objects
* enable hashdb for account
* initial state compression attempt
* wrap state db
* add tests for analyzing db
* add account predicate
* try to compress data fields as rlp too
* remove compression for storage trie
* add a compressing migration
* more compression stats tests
* fix migration import
* nested encoding compression test
* fix decompression, move db stats tests to rlpcompression
* added malformed rlp tests, cover a few edge cases
* new CompressingEncoder struct
* extend migrations to state
* first version working on the whole db
* clean up Compressible impl
* tests cleanup
* add a testing migration
* refactor deep compression using option, add simple compression
* put tests in a module
* fix compressed overlay loading
* simple compression for snapshots
* remove unused DecompressingDecoder
* add a general compressing migration
* add more common rlps to compress
* use static slices for swapper
* add precomputed hashes and invalid rlps
* make decoder private again
* cover more cases with tests
* style
* fix weird indentation
* remove possible panic in payload_info
* make prefix checking safe
* fix db existence check
* remove db dir from test
* pass usize by value [ci skip]
* Improve comment on panic removal.
* add common blocks db rlps
* add compression to blockchain db
* add blocks db migration
* fix the migrations
* remove state compression
* add a separate snapshot swapper
* ability to use different swappers and traversal
* update tests to new interface
* clean up code ordering
* update usage
* fix compilation
* remove unnecessary changes
* move methods to functions to reduce interface
* move test to module
* update common rlps to blocks db
* move tests to tests modules
* remove redundant &
2016-07-27 17:11:41 +02:00
|
|
|
let mut state_path = path.to_owned();
|
2016-07-28 23:46:24 +02:00
|
|
|
state_path.push("db");
|
Blocks and snapshot compression (#1687)
* new Compressible rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* make compressed rlp iterable
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* add basic account compression test
* add new rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* invalid rlp slice swapper
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* add account compress/ decompress test
* make compressor cleaner, use hashmaps for swapper
* improve compression tests
* add a DecompressingDecoder, change Decoder to take refernce
* separate rlp compression related stuff
* new Compressible rlp trait
* new Compressible rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* make compressed rlp iterable
* make compressed rlp iterable
* invalid rlp slice swapper
* invalid rlp slice swapper
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* add basic account compression test
* add new rlp trait
* add account compress/ decompress test
* make compressor cleaner, use hashmaps for swapper
* improve compression tests
* add a DecompressingDecoder, change Decoder to take refernce
* separate rlp compression related stuff
* DecompressingDecoder test
* initial compressing HashDB wrapper
* remove unused test
* change CompressedDB to struct wrapper with overlay
* simplify compressor
* failed RefCell attempt
* use denote to return reference
* compiled compresseddb
* compressdb test, add overlay emplace
* fix overlay reference count handling
* add immutable compresseddb, make account use hashdb
* simplify using trait objects
* enable hashdb for account
* initial state compression attempt
* wrap state db
* add tests for analyzing db
* add account predicate
* try to compress data fields as rlp too
* remove compression for storage trie
* add a compressing migration
* more compression stats tests
* fix migration import
* nested encoding compression test
* fix decompression, move db stats tests to rlpcompression
* added malformed rlp tests, cover a few edge cases
* new CompressingEncoder struct
* extend migrations to state
* first version working on the whole db
* clean up Compressible impl
* tests cleanup
* add a testing migration
* refactor deep compression using option, add simple compression
* put tests in a module
* fix compressed overlay loading
* simple compression for snapshots
* remove unused DecompressingDecoder
* add a general compressing migration
* add more common rlps to compress
* use static slices for swapper
* add precomputed hashes and invalid rlps
* make decoder private again
* cover more cases with tests
* style
* fix weird indentation
* remove possible panic in payload_info
* make prefix checking safe
* fix db existence check
* remove db dir from test
* pass usize by value [ci skip]
* Improve comment on panic removal.
* add common blocks db rlps
* add compression to blockchain db
* add blocks db migration
* fix the migrations
* remove state compression
* add a separate snapshot swapper
* ability to use different swappers and traversal
* update tests to new interface
* clean up code ordering
* update usage
* fix compilation
* remove unnecessary changes
* move methods to functions to reduce interface
* move test to module
* update common rlps to blocks db
* move tests to tests modules
* remove redundant &
2016-07-27 17:11:41 +02:00
|
|
|
state_path
|
|
|
|
}
|
|
|
|
|
2016-05-27 08:23:29 +02:00
|
|
|
/// Database backup
|
2016-07-11 09:46:33 +02:00
|
|
|
fn backup_database_path(path: &Path) -> PathBuf {
|
|
|
|
let mut backup_path = path.to_owned();
|
2016-05-27 08:23:29 +02:00
|
|
|
backup_path.pop();
|
|
|
|
backup_path.push("temp_backup");
|
|
|
|
backup_path
|
2016-05-26 18:24:51 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Default migration settings.
|
2016-07-28 23:46:24 +02:00
|
|
|
pub fn default_migration_settings(compaction_profile: &CompactionProfile) -> MigrationConfig {
|
2016-05-26 18:24:51 +02:00
|
|
|
MigrationConfig {
|
|
|
|
batch_size: BATCH_SIZE,
|
2016-07-28 23:46:24 +02:00
|
|
|
compaction_profile: *compaction_profile,
|
2016-05-26 18:24:51 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
/// Migrations on the consolidated database.
|
|
|
|
fn consolidated_database_migrations(compaction_profile: &CompactionProfile) -> Result<MigrationManager, Error> {
|
2016-10-03 12:02:43 +02:00
|
|
|
let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
|
2016-12-27 12:53:56 +01:00
|
|
|
manager.add_migration(migrations::ToV10::new()).map_err(|_| Error::MigrationImpossible)?;
|
2017-02-26 18:41:40 +01:00
|
|
|
manager.add_migration(migrations::TO_V11).map_err(|_| Error::MigrationImpossible)?;
|
2017-03-22 18:32:04 +01:00
|
|
|
manager.add_migration(migrations::TO_V12).map_err(|_| Error::MigrationImpossible)?;
|
2016-05-26 18:24:51 +02:00
|
|
|
Ok(manager)
|
|
|
|
}
|
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
/// Consolidates legacy databases into single one.
|
|
|
|
fn consolidate_database(
|
|
|
|
old_db_path: PathBuf,
|
|
|
|
new_db_path: PathBuf,
|
|
|
|
column: Option<u32>,
|
|
|
|
extract: Extract,
|
|
|
|
compaction_profile: &CompactionProfile) -> Result<(), Error> {
|
2017-10-16 12:11:35 +02:00
|
|
|
fn db_error(e: kvdb::Error) -> Error {
|
2016-07-28 23:46:24 +02:00
|
|
|
warn!("Cannot open Database for consolidation: {:?}", e);
|
|
|
|
Error::MigrationFailed
|
|
|
|
}
|
2016-05-26 18:24:51 +02:00
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
let mut migration = migrations::ToV9::new(column, extract);
|
|
|
|
let config = default_migration_settings(compaction_profile);
|
|
|
|
let mut db_config = DatabaseConfig {
|
|
|
|
max_open_files: 64,
|
2016-09-27 18:16:32 +02:00
|
|
|
cache_sizes: Default::default(),
|
2016-08-03 19:01:48 +02:00
|
|
|
compaction: config.compaction_profile,
|
2016-07-28 23:46:24 +02:00
|
|
|
columns: None,
|
2016-07-29 15:36:00 +02:00
|
|
|
wal: true,
|
2016-07-11 09:46:33 +02:00
|
|
|
};
|
Blocks and snapshot compression (#1687)
* new Compressible rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* make compressed rlp iterable
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* add basic account compression test
* add new rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* invalid rlp slice swapper
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* add account compress/ decompress test
* make compressor cleaner, use hashmaps for swapper
* improve compression tests
* add a DecompressingDecoder, change Decoder to take refernce
* separate rlp compression related stuff
* new Compressible rlp trait
* new Compressible rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* make compressed rlp iterable
* make compressed rlp iterable
* invalid rlp slice swapper
* invalid rlp slice swapper
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* add basic account compression test
* add new rlp trait
* add account compress/ decompress test
* make compressor cleaner, use hashmaps for swapper
* improve compression tests
* add a DecompressingDecoder, change Decoder to take refernce
* separate rlp compression related stuff
* DecompressingDecoder test
* initial compressing HashDB wrapper
* remove unused test
* change CompressedDB to struct wrapper with overlay
* simplify compressor
* failed RefCell attempt
* use denote to return reference
* compiled compresseddb
* compressdb test, add overlay emplace
* fix overlay reference count handling
* add immutable compresseddb, make account use hashdb
* simplify using trait objects
* enable hashdb for account
* initial state compression attempt
* wrap state db
* add tests for analyzing db
* add account predicate
* try to compress data fields as rlp too
* remove compression for storage trie
* add a compressing migration
* more compression stats tests
* fix migration import
* nested encoding compression test
* fix decompression, move db stats tests to rlpcompression
* added malformed rlp tests, cover a few edge cases
* new CompressingEncoder struct
* extend migrations to state
* first version working on the whole db
* clean up Compressible impl
* tests cleanup
* add a testing migration
* refactor deep compression using option, add simple compression
* put tests in a module
* fix compressed overlay loading
* simple compression for snapshots
* remove unused DecompressingDecoder
* add a general compressing migration
* add more common rlps to compress
* use static slices for swapper
* add precomputed hashes and invalid rlps
* make decoder private again
* cover more cases with tests
* style
* fix weird indentation
* remove possible panic in payload_info
* make prefix checking safe
* fix db existence check
* remove db dir from test
* pass usize by value [ci skip]
* Improve comment on panic removal.
* add common blocks db rlps
* add compression to blockchain db
* add blocks db migration
* fix the migrations
* remove state compression
* add a separate snapshot swapper
* ability to use different swappers and traversal
* update tests to new interface
* clean up code ordering
* update usage
* fix compilation
* remove unnecessary changes
* move methods to functions to reduce interface
* move test to module
* update common rlps to blocks db
* move tests to tests modules
* remove redundant &
2016-07-27 17:11:41 +02:00
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
let old_path_str = old_db_path.to_str().ok_or(Error::MigrationImpossible)?;
|
|
|
|
let new_path_str = new_db_path.to_str().ok_or(Error::MigrationImpossible)?;
|
2016-07-28 23:46:24 +02:00
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
let cur_db = Arc::new(Database::open(&db_config, old_path_str).map_err(db_error)?);
|
2016-07-28 23:46:24 +02:00
|
|
|
// open new DB with proper number of columns
|
|
|
|
db_config.columns = migration.columns();
|
2016-12-27 12:53:56 +01:00
|
|
|
let mut new_db = Database::open(&db_config, new_path_str).map_err(db_error)?;
|
2016-07-28 23:46:24 +02:00
|
|
|
|
|
|
|
// Migrate to new database (default column only)
|
2016-12-27 12:53:56 +01:00
|
|
|
migration.migrate(cur_db, &config, &mut new_db, None)?;
|
2016-07-28 23:46:24 +02:00
|
|
|
|
|
|
|
Ok(())
|
2016-07-11 09:46:33 +02:00
|
|
|
}
|
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
|
2016-05-26 18:24:51 +02:00
|
|
|
/// Migrates database at given position with given migration rules.
|
2016-07-11 09:46:33 +02:00
|
|
|
fn migrate_database(version: u32, db_path: PathBuf, mut migrations: MigrationManager) -> Result<(), Error> {
|
2016-05-26 18:24:51 +02:00
|
|
|
// check if migration is needed
|
|
|
|
if !migrations.is_needed(version) {
|
|
|
|
return Ok(())
|
|
|
|
}
|
|
|
|
|
2016-07-06 12:05:23 +02:00
|
|
|
let backup_path = backup_database_path(&db_path);
|
|
|
|
// remove the backup dir if it exists
|
2016-05-27 08:23:29 +02:00
|
|
|
let _ = fs::remove_dir_all(&backup_path);
|
2016-05-26 18:24:51 +02:00
|
|
|
|
2016-07-06 12:05:23 +02:00
|
|
|
// migrate old database to the new one
|
2016-12-27 12:53:56 +01:00
|
|
|
let temp_path = migrations.execute(&db_path, version)?;
|
2016-05-26 18:24:51 +02:00
|
|
|
|
2017-02-26 19:22:51 +01:00
|
|
|
// completely in-place migration leads to the paths being equal.
|
|
|
|
// in that case, no need to shuffle directories.
|
2017-02-27 19:02:16 +01:00
|
|
|
if temp_path == db_path { return Ok(()) }
|
|
|
|
|
|
|
|
// create backup
|
|
|
|
fs::rename(&db_path, &backup_path)?;
|
2016-05-26 18:24:51 +02:00
|
|
|
|
2017-02-27 19:02:16 +01:00
|
|
|
// replace the old database with the new one
|
|
|
|
if let Err(err) = fs::rename(&temp_path, &db_path) {
|
|
|
|
// if something went wrong, bring back backup
|
|
|
|
fs::rename(&backup_path, &db_path)?;
|
|
|
|
return Err(err.into());
|
2017-02-26 19:22:51 +01:00
|
|
|
}
|
2016-05-26 18:24:51 +02:00
|
|
|
|
2017-02-27 19:02:16 +01:00
|
|
|
// remove backup
|
|
|
|
fs::remove_dir_all(&backup_path).map_err(Into::into)
|
2016-05-26 18:24:51 +02:00
|
|
|
}
|
|
|
|
|
2016-07-11 09:46:33 +02:00
|
|
|
fn exists(path: &Path) -> bool {
|
2016-06-20 16:29:04 +02:00
|
|
|
fs::metadata(path).is_ok()
|
|
|
|
}
|
|
|
|
|
2016-05-26 18:24:51 +02:00
|
|
|
/// Migrates the database.
|
2016-07-28 20:29:58 +02:00
|
|
|
pub fn migrate(path: &Path, pruning: Algorithm, compaction_profile: CompactionProfile) -> Result<(), Error> {
|
2016-05-26 18:24:51 +02:00
|
|
|
// read version file.
|
2016-12-27 12:53:56 +01:00
|
|
|
let version = current_version(path)?;
|
2016-05-26 18:24:51 +02:00
|
|
|
|
|
|
|
// migrate the databases.
|
2016-06-20 16:29:04 +02:00
|
|
|
// main db directory may already exists, so let's check if we have blocks dir
|
2016-07-28 23:46:24 +02:00
|
|
|
if version > CURRENT_VERSION {
|
2016-07-16 10:41:09 +02:00
|
|
|
return Err(Error::FutureDBVersion);
|
2016-05-26 18:24:51 +02:00
|
|
|
}
|
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
// We are in the latest version, yay!
|
|
|
|
if version == CURRENT_VERSION {
|
|
|
|
return Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform pre-consolidation migrations
|
|
|
|
if version < CONSOLIDATION_VERSION && exists(&legacy::blocks_database_path(path)) {
|
|
|
|
println!("Migrating database from version {} to {}", version, CONSOLIDATION_VERSION);
|
2016-10-31 16:18:20 +01:00
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
migrate_database(version, legacy::extras_database_path(path), legacy::extras_database_migrations(&compaction_profile)?)?;
|
|
|
|
migrate_database(version, legacy::state_database_path(path), legacy::state_database_migrations(pruning, &compaction_profile)?)?;
|
|
|
|
migrate_database(version, legacy::blocks_database_path(path), legacy::blocks_database_migrations(&compaction_profile)?)?;
|
2016-10-31 16:18:20 +01:00
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
let db_path = consolidated_database_path(path);
|
|
|
|
// Remove the database dir (it shouldn't exist anyway, but it might when migration was interrupted)
|
|
|
|
let _ = fs::remove_dir_all(db_path.clone());
|
2016-12-27 12:53:56 +01:00
|
|
|
consolidate_database(legacy::blocks_database_path(path), db_path.clone(), db::COL_HEADERS, Extract::Header, &compaction_profile)?;
|
|
|
|
consolidate_database(legacy::blocks_database_path(path), db_path.clone(), db::COL_BODIES, Extract::Body, &compaction_profile)?;
|
|
|
|
consolidate_database(legacy::extras_database_path(path), db_path.clone(), db::COL_EXTRA, Extract::All, &compaction_profile)?;
|
|
|
|
consolidate_database(legacy::state_database_path(path), db_path.clone(), db::COL_STATE, Extract::All, &compaction_profile)?;
|
|
|
|
consolidate_database(legacy::trace_database_path(path), db_path.clone(), db::COL_TRACE, Extract::All, &compaction_profile)?;
|
2016-07-28 23:46:24 +02:00
|
|
|
let _ = fs::remove_dir_all(legacy::blocks_database_path(path));
|
|
|
|
let _ = fs::remove_dir_all(legacy::extras_database_path(path));
|
|
|
|
let _ = fs::remove_dir_all(legacy::state_database_path(path));
|
|
|
|
let _ = fs::remove_dir_all(legacy::trace_database_path(path));
|
|
|
|
println!("Migration finished");
|
|
|
|
}
|
|
|
|
|
2016-10-31 16:18:20 +01:00
|
|
|
// update version so we can apply post-consolidation migrations.
|
|
|
|
let version = ::std::cmp::max(CONSOLIDATION_VERSION, version);
|
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
// Further migrations
|
|
|
|
if version >= CONSOLIDATION_VERSION && version < CURRENT_VERSION && exists(&consolidated_database_path(path)) {
|
|
|
|
println!("Migrating database from version {} to {}", ::std::cmp::max(CONSOLIDATION_VERSION, version), CURRENT_VERSION);
|
2016-12-27 12:53:56 +01:00
|
|
|
migrate_database(version, consolidated_database_path(path), consolidated_database_migrations(&compaction_profile)?)?;
|
2016-07-28 23:46:24 +02:00
|
|
|
println!("Migration finished");
|
|
|
|
}
|
|
|
|
|
2016-05-26 18:24:51 +02:00
|
|
|
// update version file.
|
|
|
|
update_version(path)
|
|
|
|
}
|
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
/// Old migrations utilities
|
|
|
|
mod legacy {
|
|
|
|
use super::*;
|
|
|
|
use std::path::{Path, PathBuf};
|
2017-10-10 20:01:27 +02:00
|
|
|
use migr::{Manager as MigrationManager};
|
2017-10-12 15:36:27 +02:00
|
|
|
use kvdb_rocksdb::CompactionProfile;
|
2016-07-28 23:46:24 +02:00
|
|
|
use ethcore::migrations;
|
|
|
|
|
|
|
|
/// Blocks database path.
|
|
|
|
pub fn blocks_database_path(path: &Path) -> PathBuf {
|
|
|
|
let mut blocks_path = path.to_owned();
|
|
|
|
blocks_path.push("blocks");
|
|
|
|
blocks_path
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Extras database path.
|
|
|
|
pub fn extras_database_path(path: &Path) -> PathBuf {
|
|
|
|
let mut extras_path = path.to_owned();
|
|
|
|
extras_path.push("extras");
|
|
|
|
extras_path
|
|
|
|
}
|
|
|
|
|
|
|
|
/// State database path.
|
|
|
|
pub fn state_database_path(path: &Path) -> PathBuf {
|
|
|
|
let mut state_path = path.to_owned();
|
|
|
|
state_path.push("state");
|
|
|
|
state_path
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Trace database path.
|
|
|
|
pub fn trace_database_path(path: &Path) -> PathBuf {
|
|
|
|
let mut blocks_path = path.to_owned();
|
|
|
|
blocks_path.push("tracedb");
|
|
|
|
blocks_path
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Migrations on the blocks database.
|
|
|
|
pub fn blocks_database_migrations(compaction_profile: &CompactionProfile) -> Result<MigrationManager, Error> {
|
|
|
|
let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
|
2016-12-27 12:53:56 +01:00
|
|
|
manager.add_migration(migrations::blocks::V8::default()).map_err(|_| Error::MigrationImpossible)?;
|
2016-07-28 23:46:24 +02:00
|
|
|
Ok(manager)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Migrations on the extras database.
|
|
|
|
pub fn extras_database_migrations(compaction_profile: &CompactionProfile) -> Result<MigrationManager, Error> {
|
|
|
|
let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
|
2016-12-27 12:53:56 +01:00
|
|
|
manager.add_migration(migrations::extras::ToV6).map_err(|_| Error::MigrationImpossible)?;
|
2016-07-28 23:46:24 +02:00
|
|
|
Ok(manager)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Migrations on the state database.
|
|
|
|
pub fn state_database_migrations(pruning: Algorithm, compaction_profile: &CompactionProfile) -> Result<MigrationManager, Error> {
|
|
|
|
let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
|
|
|
|
let res = match pruning {
|
|
|
|
Algorithm::Archive => manager.add_migration(migrations::state::ArchiveV7::default()),
|
|
|
|
Algorithm::OverlayRecent => manager.add_migration(migrations::state::OverlayRecentV7::default()),
|
2016-09-23 18:36:50 +02:00
|
|
|
_ => return Err(Error::UnsupportedPruningMethod),
|
2016-07-28 23:46:24 +02:00
|
|
|
};
|
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
res.map_err(|_| Error::MigrationImpossible)?;
|
2016-07-28 23:46:24 +02:00
|
|
|
Ok(manager)
|
|
|
|
}
|
|
|
|
}
|