2019-01-07 11:33:07 +01:00
|
|
|
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
|
|
|
// This file is part of Parity Ethereum.
|
2016-05-26 18:24:51 +02:00
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
// Parity Ethereum is free software: you can redistribute it and/or modify
|
2016-05-26 18:24:51 +02:00
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
// Parity Ethereum is distributed in the hope that it will be useful,
|
2016-05-26 18:24:51 +02:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
2019-01-07 11:33:07 +01:00
|
|
|
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
2016-05-26 18:24:51 +02:00
|
|
|
|
2016-05-27 08:23:29 +02:00
|
|
|
use std::fs;
|
2016-05-26 18:24:51 +02:00
|
|
|
use std::io::{Read, Write, Error as IoError, ErrorKind};
|
2016-07-11 09:46:33 +02:00
|
|
|
use std::path::{Path, PathBuf};
|
2016-05-26 18:24:51 +02:00
|
|
|
use std::fmt::{Display, Formatter, Error as FmtError};
|
2018-07-02 11:04:48 +02:00
|
|
|
use super::migration_rocksdb::{Manager as MigrationManager, Config as MigrationConfig, ChangeColumns};
|
2018-06-20 15:13:07 +02:00
|
|
|
use super::kvdb_rocksdb::{CompactionProfile, DatabaseConfig};
|
2018-04-13 21:14:53 +02:00
|
|
|
use ethcore::client::DatabaseCompactionProfile;
|
2019-01-04 14:05:46 +01:00
|
|
|
use ethcore;
|
2018-04-13 21:14:53 +02:00
|
|
|
|
|
|
|
use super::helpers;
|
2018-06-20 15:13:07 +02:00
|
|
|
use super::blooms::migrate_blooms;
|
2018-04-13 21:14:53 +02:00
|
|
|
|
|
|
|
/// The migration from v10 to v11.
|
|
|
|
/// Adds a column for node info.
|
|
|
|
pub const TO_V11: ChangeColumns = ChangeColumns {
|
|
|
|
pre_columns: Some(6),
|
|
|
|
post_columns: Some(7),
|
|
|
|
version: 11,
|
|
|
|
};
|
|
|
|
|
|
|
|
/// The migration from v11 to v12.
|
|
|
|
/// Adds a column for light chain storage.
|
|
|
|
pub const TO_V12: ChangeColumns = ChangeColumns {
|
|
|
|
pre_columns: Some(7),
|
|
|
|
post_columns: Some(8),
|
|
|
|
version: 12,
|
|
|
|
};
|
|
|
|
|
2016-05-26 18:24:51 +02:00
|
|
|
/// Database is assumed to be at default version, when no version file is found.
|
|
|
|
const DEFAULT_VERSION: u32 = 5;
|
|
|
|
/// Current version of database models.
|
2018-06-20 15:13:07 +02:00
|
|
|
const CURRENT_VERSION: u32 = 13;
|
|
|
|
/// A version of database at which blooms-db was introduced
|
|
|
|
const BLOOMS_DB_VERSION: u32 = 13;
|
2016-05-26 18:24:51 +02:00
|
|
|
/// Defines how many items are migrated to the new version of database at once.
|
|
|
|
const BATCH_SIZE: usize = 1024;
|
|
|
|
/// Version file name.
|
|
|
|
const VERSION_FILE_NAME: &'static str = "db_version";
|
|
|
|
|
|
|
|
/// Migration related erorrs.
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum Error {
|
|
|
|
/// Returned when current version cannot be read or guessed.
|
|
|
|
UnknownDatabaseVersion,
|
2016-07-16 10:41:09 +02:00
|
|
|
/// Existing DB is newer than the known one.
|
|
|
|
FutureDBVersion,
|
|
|
|
/// Migration is not possible.
|
2016-05-26 18:24:51 +02:00
|
|
|
MigrationImpossible,
|
2018-06-20 15:13:07 +02:00
|
|
|
/// Blooms-db migration error.
|
|
|
|
BloomsDB(ethcore::error::Error),
|
2016-07-16 10:41:09 +02:00
|
|
|
/// Migration was completed succesfully,
|
2016-05-26 18:24:51 +02:00
|
|
|
/// but there was a problem with io.
|
|
|
|
Io(IoError),
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Display for Error {
|
|
|
|
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
|
|
|
|
let out = match *self {
|
|
|
|
Error::UnknownDatabaseVersion => "Current database version cannot be read".into(),
|
2016-07-16 10:41:09 +02:00
|
|
|
Error::FutureDBVersion => "Database was created with newer client version. Upgrade your client or delete DB and resync.".into(),
|
|
|
|
Error::MigrationImpossible => format!("Database migration to version {} is not possible.", CURRENT_VERSION),
|
2018-06-20 15:13:07 +02:00
|
|
|
Error::BloomsDB(ref err) => format!("blooms-db migration error: {}", err),
|
2016-07-16 10:41:09 +02:00
|
|
|
Error::Io(ref err) => format!("Unexpected io error on DB migration: {}.", err),
|
2016-05-26 18:24:51 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
write!(f, "{}", out)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<IoError> for Error {
|
|
|
|
fn from(err: IoError) -> Self {
|
|
|
|
Error::Io(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the version file path.
|
2016-07-11 09:46:33 +02:00
|
|
|
fn version_file_path(path: &Path) -> PathBuf {
|
|
|
|
let mut file_path = path.to_owned();
|
2016-05-26 18:24:51 +02:00
|
|
|
file_path.push(VERSION_FILE_NAME);
|
|
|
|
file_path
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Reads current database version from the file at given path.
|
2016-05-27 17:56:25 +02:00
|
|
|
/// If the file does not exist returns `DEFAULT_VERSION`.
|
2016-07-11 09:46:33 +02:00
|
|
|
fn current_version(path: &Path) -> Result<u32, Error> {
|
2018-02-22 14:53:10 +01:00
|
|
|
match fs::File::open(version_file_path(path)) {
|
2016-05-26 18:24:51 +02:00
|
|
|
Err(ref err) if err.kind() == ErrorKind::NotFound => Ok(DEFAULT_VERSION),
|
|
|
|
Err(_) => Err(Error::UnknownDatabaseVersion),
|
|
|
|
Ok(mut file) => {
|
|
|
|
let mut s = String::new();
|
2016-12-27 12:53:56 +01:00
|
|
|
file.read_to_string(&mut s).map_err(|_| Error::UnknownDatabaseVersion)?;
|
2016-05-26 18:24:51 +02:00
|
|
|
u32::from_str_radix(&s, 10).map_err(|_| Error::UnknownDatabaseVersion)
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Writes current database version to the file.
|
|
|
|
/// Creates a new file if the version file does not exist yet.
|
2016-07-11 09:46:33 +02:00
|
|
|
fn update_version(path: &Path) -> Result<(), Error> {
|
2016-12-27 12:53:56 +01:00
|
|
|
fs::create_dir_all(path)?;
|
2018-02-22 14:53:10 +01:00
|
|
|
let mut file = fs::File::create(version_file_path(path))?;
|
2016-12-27 12:53:56 +01:00
|
|
|
file.write_all(format!("{}", CURRENT_VERSION).as_bytes())?;
|
2016-05-26 18:24:51 +02:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
/// Consolidated database path
|
|
|
|
fn consolidated_database_path(path: &Path) -> PathBuf {
|
Blocks and snapshot compression (#1687)
* new Compressible rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* make compressed rlp iterable
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* add basic account compression test
* add new rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* invalid rlp slice swapper
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* add account compress/ decompress test
* make compressor cleaner, use hashmaps for swapper
* improve compression tests
* add a DecompressingDecoder, change Decoder to take refernce
* separate rlp compression related stuff
* new Compressible rlp trait
* new Compressible rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* make compressed rlp iterable
* make compressed rlp iterable
* invalid rlp slice swapper
* invalid rlp slice swapper
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* add basic account compression test
* add new rlp trait
* add account compress/ decompress test
* make compressor cleaner, use hashmaps for swapper
* improve compression tests
* add a DecompressingDecoder, change Decoder to take refernce
* separate rlp compression related stuff
* DecompressingDecoder test
* initial compressing HashDB wrapper
* remove unused test
* change CompressedDB to struct wrapper with overlay
* simplify compressor
* failed RefCell attempt
* use denote to return reference
* compiled compresseddb
* compressdb test, add overlay emplace
* fix overlay reference count handling
* add immutable compresseddb, make account use hashdb
* simplify using trait objects
* enable hashdb for account
* initial state compression attempt
* wrap state db
* add tests for analyzing db
* add account predicate
* try to compress data fields as rlp too
* remove compression for storage trie
* add a compressing migration
* more compression stats tests
* fix migration import
* nested encoding compression test
* fix decompression, move db stats tests to rlpcompression
* added malformed rlp tests, cover a few edge cases
* new CompressingEncoder struct
* extend migrations to state
* first version working on the whole db
* clean up Compressible impl
* tests cleanup
* add a testing migration
* refactor deep compression using option, add simple compression
* put tests in a module
* fix compressed overlay loading
* simple compression for snapshots
* remove unused DecompressingDecoder
* add a general compressing migration
* add more common rlps to compress
* use static slices for swapper
* add precomputed hashes and invalid rlps
* make decoder private again
* cover more cases with tests
* style
* fix weird indentation
* remove possible panic in payload_info
* make prefix checking safe
* fix db existence check
* remove db dir from test
* pass usize by value [ci skip]
* Improve comment on panic removal.
* add common blocks db rlps
* add compression to blockchain db
* add blocks db migration
* fix the migrations
* remove state compression
* add a separate snapshot swapper
* ability to use different swappers and traversal
* update tests to new interface
* clean up code ordering
* update usage
* fix compilation
* remove unnecessary changes
* move methods to functions to reduce interface
* move test to module
* update common rlps to blocks db
* move tests to tests modules
* remove redundant &
2016-07-27 17:11:41 +02:00
|
|
|
let mut state_path = path.to_owned();
|
2016-07-28 23:46:24 +02:00
|
|
|
state_path.push("db");
|
Blocks and snapshot compression (#1687)
* new Compressible rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* make compressed rlp iterable
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* add basic account compression test
* add new rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* invalid rlp slice swapper
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* add account compress/ decompress test
* make compressor cleaner, use hashmaps for swapper
* improve compression tests
* add a DecompressingDecoder, change Decoder to take refernce
* separate rlp compression related stuff
* new Compressible rlp trait
* new Compressible rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* make compressed rlp iterable
* make compressed rlp iterable
* invalid rlp slice swapper
* invalid rlp slice swapper
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* add basic account compression test
* add new rlp trait
* add account compress/ decompress test
* make compressor cleaner, use hashmaps for swapper
* improve compression tests
* add a DecompressingDecoder, change Decoder to take refernce
* separate rlp compression related stuff
* DecompressingDecoder test
* initial compressing HashDB wrapper
* remove unused test
* change CompressedDB to struct wrapper with overlay
* simplify compressor
* failed RefCell attempt
* use denote to return reference
* compiled compresseddb
* compressdb test, add overlay emplace
* fix overlay reference count handling
* add immutable compresseddb, make account use hashdb
* simplify using trait objects
* enable hashdb for account
* initial state compression attempt
* wrap state db
* add tests for analyzing db
* add account predicate
* try to compress data fields as rlp too
* remove compression for storage trie
* add a compressing migration
* more compression stats tests
* fix migration import
* nested encoding compression test
* fix decompression, move db stats tests to rlpcompression
* added malformed rlp tests, cover a few edge cases
* new CompressingEncoder struct
* extend migrations to state
* first version working on the whole db
* clean up Compressible impl
* tests cleanup
* add a testing migration
* refactor deep compression using option, add simple compression
* put tests in a module
* fix compressed overlay loading
* simple compression for snapshots
* remove unused DecompressingDecoder
* add a general compressing migration
* add more common rlps to compress
* use static slices for swapper
* add precomputed hashes and invalid rlps
* make decoder private again
* cover more cases with tests
* style
* fix weird indentation
* remove possible panic in payload_info
* make prefix checking safe
* fix db existence check
* remove db dir from test
* pass usize by value [ci skip]
* Improve comment on panic removal.
* add common blocks db rlps
* add compression to blockchain db
* add blocks db migration
* fix the migrations
* remove state compression
* add a separate snapshot swapper
* ability to use different swappers and traversal
* update tests to new interface
* clean up code ordering
* update usage
* fix compilation
* remove unnecessary changes
* move methods to functions to reduce interface
* move test to module
* update common rlps to blocks db
* move tests to tests modules
* remove redundant &
2016-07-27 17:11:41 +02:00
|
|
|
state_path
|
|
|
|
}
|
|
|
|
|
2016-05-27 08:23:29 +02:00
|
|
|
/// Database backup
|
2016-07-11 09:46:33 +02:00
|
|
|
fn backup_database_path(path: &Path) -> PathBuf {
|
|
|
|
let mut backup_path = path.to_owned();
|
2016-05-27 08:23:29 +02:00
|
|
|
backup_path.pop();
|
|
|
|
backup_path.push("temp_backup");
|
|
|
|
backup_path
|
2016-05-26 18:24:51 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Default migration settings.
|
2016-07-28 23:46:24 +02:00
|
|
|
pub fn default_migration_settings(compaction_profile: &CompactionProfile) -> MigrationConfig {
|
2016-05-26 18:24:51 +02:00
|
|
|
MigrationConfig {
|
|
|
|
batch_size: BATCH_SIZE,
|
2016-07-28 23:46:24 +02:00
|
|
|
compaction_profile: *compaction_profile,
|
2016-05-26 18:24:51 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
/// Migrations on the consolidated database.
|
|
|
|
fn consolidated_database_migrations(compaction_profile: &CompactionProfile) -> Result<MigrationManager, Error> {
|
2016-10-03 12:02:43 +02:00
|
|
|
let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
|
2018-04-13 21:14:53 +02:00
|
|
|
manager.add_migration(TO_V11).map_err(|_| Error::MigrationImpossible)?;
|
|
|
|
manager.add_migration(TO_V12).map_err(|_| Error::MigrationImpossible)?;
|
2016-05-26 18:24:51 +02:00
|
|
|
Ok(manager)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Migrates database at given position with given migration rules.
|
2018-06-28 12:24:11 +02:00
|
|
|
fn migrate_database(version: u32, db_path: &Path, mut migrations: MigrationManager) -> Result<(), Error> {
|
2016-05-26 18:24:51 +02:00
|
|
|
// check if migration is needed
|
|
|
|
if !migrations.is_needed(version) {
|
|
|
|
return Ok(())
|
|
|
|
}
|
|
|
|
|
2016-07-06 12:05:23 +02:00
|
|
|
let backup_path = backup_database_path(&db_path);
|
|
|
|
// remove the backup dir if it exists
|
2016-05-27 08:23:29 +02:00
|
|
|
let _ = fs::remove_dir_all(&backup_path);
|
2016-05-26 18:24:51 +02:00
|
|
|
|
2016-07-06 12:05:23 +02:00
|
|
|
// migrate old database to the new one
|
2016-12-27 12:53:56 +01:00
|
|
|
let temp_path = migrations.execute(&db_path, version)?;
|
2016-05-26 18:24:51 +02:00
|
|
|
|
2017-02-26 19:22:51 +01:00
|
|
|
// completely in-place migration leads to the paths being equal.
|
|
|
|
// in that case, no need to shuffle directories.
|
2017-02-27 19:02:16 +01:00
|
|
|
if temp_path == db_path { return Ok(()) }
|
|
|
|
|
|
|
|
// create backup
|
|
|
|
fs::rename(&db_path, &backup_path)?;
|
2016-05-26 18:24:51 +02:00
|
|
|
|
2017-02-27 19:02:16 +01:00
|
|
|
// replace the old database with the new one
|
|
|
|
if let Err(err) = fs::rename(&temp_path, &db_path) {
|
|
|
|
// if something went wrong, bring back backup
|
|
|
|
fs::rename(&backup_path, &db_path)?;
|
|
|
|
return Err(err.into());
|
2017-02-26 19:22:51 +01:00
|
|
|
}
|
2016-05-26 18:24:51 +02:00
|
|
|
|
2017-02-27 19:02:16 +01:00
|
|
|
// remove backup
|
|
|
|
fs::remove_dir_all(&backup_path).map_err(Into::into)
|
2016-05-26 18:24:51 +02:00
|
|
|
}
|
|
|
|
|
2016-07-11 09:46:33 +02:00
|
|
|
fn exists(path: &Path) -> bool {
|
2016-06-20 16:29:04 +02:00
|
|
|
fs::metadata(path).is_ok()
|
|
|
|
}
|
|
|
|
|
2016-05-26 18:24:51 +02:00
|
|
|
/// Migrates the database.
|
2018-04-13 21:14:53 +02:00
|
|
|
pub fn migrate(path: &Path, compaction_profile: &DatabaseCompactionProfile) -> Result<(), Error> {
|
|
|
|
let compaction_profile = helpers::compaction_profile(&compaction_profile, path);
|
|
|
|
|
2016-05-26 18:24:51 +02:00
|
|
|
// read version file.
|
2016-12-27 12:53:56 +01:00
|
|
|
let version = current_version(path)?;
|
2016-05-26 18:24:51 +02:00
|
|
|
|
|
|
|
// migrate the databases.
|
2016-06-20 16:29:04 +02:00
|
|
|
// main db directory may already exists, so let's check if we have blocks dir
|
2016-07-28 23:46:24 +02:00
|
|
|
if version > CURRENT_VERSION {
|
2016-07-16 10:41:09 +02:00
|
|
|
return Err(Error::FutureDBVersion);
|
2016-05-26 18:24:51 +02:00
|
|
|
}
|
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
// We are in the latest version, yay!
|
|
|
|
if version == CURRENT_VERSION {
|
|
|
|
return Ok(())
|
|
|
|
}
|
|
|
|
|
2018-06-28 12:24:11 +02:00
|
|
|
let db_path = consolidated_database_path(path);
|
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
// Further migrations
|
2018-06-28 12:24:11 +02:00
|
|
|
if version < CURRENT_VERSION && exists(&db_path) {
|
2018-06-20 15:13:07 +02:00
|
|
|
println!("Migrating database from version {} to {}", version, CURRENT_VERSION);
|
2018-06-28 12:24:11 +02:00
|
|
|
migrate_database(version, &db_path, consolidated_database_migrations(&compaction_profile)?)?;
|
2018-06-20 15:13:07 +02:00
|
|
|
|
|
|
|
if version < BLOOMS_DB_VERSION {
|
|
|
|
println!("Migrating blooms to blooms-db...");
|
|
|
|
let db_config = DatabaseConfig {
|
|
|
|
max_open_files: 64,
|
|
|
|
memory_budget: None,
|
|
|
|
compaction: compaction_profile,
|
2019-01-04 14:05:46 +01:00
|
|
|
columns: ethcore_db::NUM_COLUMNS,
|
2018-06-20 15:13:07 +02:00
|
|
|
};
|
|
|
|
|
2018-06-28 12:24:11 +02:00
|
|
|
migrate_blooms(&db_path, &db_config).map_err(Error::BloomsDB)?;
|
2018-06-20 15:13:07 +02:00
|
|
|
}
|
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
println!("Migration finished");
|
|
|
|
}
|
|
|
|
|
2016-05-26 18:24:51 +02:00
|
|
|
// update version file.
|
|
|
|
update_version(path)
|
|
|
|
}
|