2020-09-22 14:53:52 +02:00
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
// This file is part of OpenEthereum.
2016-05-26 18:24:51 +02:00
2020-09-22 14:53:52 +02:00
// OpenEthereum is free software: you can redistribute it and/or modify
2016-05-26 18:24:51 +02:00
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
2020-09-22 14:53:52 +02:00
// OpenEthereum is distributed in the hope that it will be useful,
2016-05-26 18:24:51 +02:00
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
2020-09-22 14:53:52 +02:00
// along with OpenEthereum. If not, see <http://www.gnu.org/licenses/>.
2016-05-26 18:24:51 +02:00
2020-08-05 06:08:03 +02:00
use super ::{
kvdb_rocksdb ::{ CompactionProfile , DatabaseConfig } ,
migration_rocksdb ::{ ChangeColumns , Config as MigrationConfig , Manager as MigrationManager } ,
} ;
use ethcore ::{ self , client ::DatabaseCompactionProfile } ;
use std ::{
fmt ::{ Display , Error as FmtError , Formatter } ,
fs ,
io ::{ Error as IoError , ErrorKind , Read , Write } ,
path ::{ Path , PathBuf } ,
} ;
use super ::{ blooms ::migrate_blooms , helpers } ;
2018-04-13 21:14:53 +02:00
/// The migration from v10 to v11.
/// Adds a column for node info.
pub const TO_V11 : ChangeColumns = ChangeColumns {
2020-08-05 06:08:03 +02:00
pre_columns : Some ( 6 ) ,
post_columns : Some ( 7 ) ,
version : 11 ,
2018-04-13 21:14:53 +02:00
} ;
/// The migration from v11 to v12.
/// Adds a column for light chain storage.
pub const TO_V12 : ChangeColumns = ChangeColumns {
2020-08-05 06:08:03 +02:00
pre_columns : Some ( 7 ) ,
post_columns : Some ( 8 ) ,
version : 12 ,
2018-04-13 21:14:53 +02:00
} ;
2016-05-26 18:24:51 +02:00
/// Database is assumed to be at default version, when no version file is found.
const DEFAULT_VERSION : u32 = 5 ;
/// Current version of database models.
2020-09-22 12:41:04 +02:00
const CURRENT_VERSION : u32 = 16 ;
2020-09-23 13:49:25 +02:00
/// Until this version please use upgrade tool.
const USE_MIGRATION_TOOL : u32 = 15 ;
2018-06-20 15:13:07 +02:00
/// A version of database at which blooms-db was introduced
const BLOOMS_DB_VERSION : u32 = 13 ;
2016-05-26 18:24:51 +02:00
/// Defines how many items are migrated to the new version of database at once.
const BATCH_SIZE : usize = 1024 ;
/// Version file name.
const VERSION_FILE_NAME : & 'static str = " db_version " ;
/// Migration related erorrs.
#[ derive(Debug) ]
pub enum Error {
2020-08-05 06:08:03 +02:00
/// Returned when current version cannot be read or guessed.
UnknownDatabaseVersion ,
/// Existing DB is newer than the known one.
FutureDBVersion ,
/// Migration is not possible.
MigrationImpossible ,
2020-09-23 13:49:25 +02:00
/// For old versions use external migration tool
UseMigrationTool ,
2020-08-05 06:08:03 +02:00
/// Blooms-db migration error.
BloomsDB ( ethcore ::error ::Error ) ,
/// Migration was completed succesfully,
/// but there was a problem with io.
Io ( IoError ) ,
2016-05-26 18:24:51 +02:00
}
impl Display for Error {
2020-08-05 06:08:03 +02:00
fn fmt ( & self , f : & mut Formatter ) -> Result < ( ) , FmtError > {
let out = match * self {
2016-05-26 18:24:51 +02:00
Error ::UnknownDatabaseVersion = > " Current database version cannot be read " . into ( ) ,
2016-07-16 10:41:09 +02:00
Error ::FutureDBVersion = > " Database was created with newer client version. Upgrade your client or delete DB and resync. " . into ( ) ,
Error ::MigrationImpossible = > format! ( " Database migration to version {} is not possible. " , CURRENT_VERSION ) ,
2020-09-23 13:49:25 +02:00
Error ::BloomsDB ( ref err ) = > format! ( " blooms-db migration error: {} " , err ) ,
Error ::UseMigrationTool = > " For db versions 15 and lower (v2.5.13=>13, 2.7.2=>14, v3.0.1=>15) please use upgrade db tool to manually upgrade db: https://github.com/openethereum/3.1-db-upgrade-tool " . into ( ) ,
2016-07-16 10:41:09 +02:00
Error ::Io ( ref err ) = > format! ( " Unexpected io error on DB migration: {} . " , err ) ,
2016-05-26 18:24:51 +02:00
} ;
2020-08-05 06:08:03 +02:00
write! ( f , " {} " , out )
}
2016-05-26 18:24:51 +02:00
}
impl From < IoError > for Error {
2020-08-05 06:08:03 +02:00
fn from ( err : IoError ) -> Self {
Error ::Io ( err )
}
2016-05-26 18:24:51 +02:00
}
/// Returns the version file path.
2016-07-11 09:46:33 +02:00
fn version_file_path ( path : & Path ) -> PathBuf {
2020-08-05 06:08:03 +02:00
let mut file_path = path . to_owned ( ) ;
file_path . push ( VERSION_FILE_NAME ) ;
file_path
2016-05-26 18:24:51 +02:00
}
/// Reads current database version from the file at given path.
2016-05-27 17:56:25 +02:00
/// If the file does not exist returns `DEFAULT_VERSION`.
2016-07-11 09:46:33 +02:00
fn current_version ( path : & Path ) -> Result < u32 , Error > {
2020-08-05 06:08:03 +02:00
match fs ::File ::open ( version_file_path ( path ) ) {
Err ( ref err ) if err . kind ( ) = = ErrorKind ::NotFound = > Ok ( DEFAULT_VERSION ) ,
Err ( _ ) = > Err ( Error ::UnknownDatabaseVersion ) ,
Ok ( mut file ) = > {
let mut s = String ::new ( ) ;
file . read_to_string ( & mut s )
. map_err ( | _ | Error ::UnknownDatabaseVersion ) ? ;
u32 ::from_str_radix ( & s , 10 ) . map_err ( | _ | Error ::UnknownDatabaseVersion )
}
}
2016-05-26 18:24:51 +02:00
}
/// Writes current database version to the file.
/// Creates a new file if the version file does not exist yet.
2016-07-11 09:46:33 +02:00
fn update_version ( path : & Path ) -> Result < ( ) , Error > {
2020-08-05 06:08:03 +02:00
fs ::create_dir_all ( path ) ? ;
let mut file = fs ::File ::create ( version_file_path ( path ) ) ? ;
file . write_all ( format! ( " {} " , CURRENT_VERSION ) . as_bytes ( ) ) ? ;
Ok ( ( ) )
2016-05-26 18:24:51 +02:00
}
2016-07-28 23:46:24 +02:00
/// Consolidated database path
fn consolidated_database_path ( path : & Path ) -> PathBuf {
2020-08-05 06:08:03 +02:00
let mut state_path = path . to_owned ( ) ;
state_path . push ( " db " ) ;
state_path
Blocks and snapshot compression (#1687)
* new Compressible rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* make compressed rlp iterable
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* add basic account compression test
* add new rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* invalid rlp slice swapper
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* add account compress/ decompress test
* make compressor cleaner, use hashmaps for swapper
* improve compression tests
* add a DecompressingDecoder, change Decoder to take refernce
* separate rlp compression related stuff
* new Compressible rlp trait
* new Compressible rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* make compressed rlp iterable
* make compressed rlp iterable
* invalid rlp slice swapper
* invalid rlp slice swapper
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* add basic account compression test
* add new rlp trait
* add account compress/ decompress test
* make compressor cleaner, use hashmaps for swapper
* improve compression tests
* add a DecompressingDecoder, change Decoder to take refernce
* separate rlp compression related stuff
* DecompressingDecoder test
* initial compressing HashDB wrapper
* remove unused test
* change CompressedDB to struct wrapper with overlay
* simplify compressor
* failed RefCell attempt
* use denote to return reference
* compiled compresseddb
* compressdb test, add overlay emplace
* fix overlay reference count handling
* add immutable compresseddb, make account use hashdb
* simplify using trait objects
* enable hashdb for account
* initial state compression attempt
* wrap state db
* add tests for analyzing db
* add account predicate
* try to compress data fields as rlp too
* remove compression for storage trie
* add a compressing migration
* more compression stats tests
* fix migration import
* nested encoding compression test
* fix decompression, move db stats tests to rlpcompression
* added malformed rlp tests, cover a few edge cases
* new CompressingEncoder struct
* extend migrations to state
* first version working on the whole db
* clean up Compressible impl
* tests cleanup
* add a testing migration
* refactor deep compression using option, add simple compression
* put tests in a module
* fix compressed overlay loading
* simple compression for snapshots
* remove unused DecompressingDecoder
* add a general compressing migration
* add more common rlps to compress
* use static slices for swapper
* add precomputed hashes and invalid rlps
* make decoder private again
* cover more cases with tests
* style
* fix weird indentation
* remove possible panic in payload_info
* make prefix checking safe
* fix db existence check
* remove db dir from test
* pass usize by value [ci skip]
* Improve comment on panic removal.
* add common blocks db rlps
* add compression to blockchain db
* add blocks db migration
* fix the migrations
* remove state compression
* add a separate snapshot swapper
* ability to use different swappers and traversal
* update tests to new interface
* clean up code ordering
* update usage
* fix compilation
* remove unnecessary changes
* move methods to functions to reduce interface
* move test to module
* update common rlps to blocks db
* move tests to tests modules
* remove redundant &
2016-07-27 17:11:41 +02:00
}
2016-05-27 08:23:29 +02:00
/// Database backup
2016-07-11 09:46:33 +02:00
fn backup_database_path ( path : & Path ) -> PathBuf {
2020-08-05 06:08:03 +02:00
let mut backup_path = path . to_owned ( ) ;
backup_path . pop ( ) ;
backup_path . push ( " temp_backup " ) ;
backup_path
2016-05-26 18:24:51 +02:00
}
/// Default migration settings.
2016-07-28 23:46:24 +02:00
pub fn default_migration_settings ( compaction_profile : & CompactionProfile ) -> MigrationConfig {
2020-08-05 06:08:03 +02:00
MigrationConfig {
batch_size : BATCH_SIZE ,
compaction_profile : * compaction_profile ,
}
2016-05-26 18:24:51 +02:00
}
2016-07-28 23:46:24 +02:00
/// Migrations on the consolidated database.
2020-08-05 06:08:03 +02:00
fn consolidated_database_migrations (
compaction_profile : & CompactionProfile ,
) -> Result < MigrationManager , Error > {
let mut manager = MigrationManager ::new ( default_migration_settings ( compaction_profile ) ) ;
manager
. add_migration ( TO_V11 )
. map_err ( | _ | Error ::MigrationImpossible ) ? ;
manager
. add_migration ( TO_V12 )
. map_err ( | _ | Error ::MigrationImpossible ) ? ;
Ok ( manager )
2016-05-26 18:24:51 +02:00
}
/// Migrates database at given position with given migration rules.
2020-08-05 06:08:03 +02:00
fn migrate_database (
version : u32 ,
db_path : & Path ,
mut migrations : MigrationManager ,
) -> Result < ( ) , Error > {
// check if migration is needed
if ! migrations . is_needed ( version ) {
return Ok ( ( ) ) ;
}
let backup_path = backup_database_path ( & db_path ) ;
// remove the backup dir if it exists
let _ = fs ::remove_dir_all ( & backup_path ) ;
// migrate old database to the new one
let temp_path = migrations . execute ( & db_path , version ) ? ;
// completely in-place migration leads to the paths being equal.
// in that case, no need to shuffle directories.
if temp_path = = db_path {
return Ok ( ( ) ) ;
}
// create backup
fs ::rename ( & db_path , & backup_path ) ? ;
// replace the old database with the new one
if let Err ( err ) = fs ::rename ( & temp_path , & db_path ) {
// if something went wrong, bring back backup
fs ::rename ( & backup_path , & db_path ) ? ;
return Err ( err . into ( ) ) ;
}
// remove backup
fs ::remove_dir_all ( & backup_path ) . map_err ( Into ::into )
2016-05-26 18:24:51 +02:00
}
2016-07-11 09:46:33 +02:00
fn exists ( path : & Path ) -> bool {
2020-08-05 06:08:03 +02:00
fs ::metadata ( path ) . is_ok ( )
2016-06-20 16:29:04 +02:00
}
2016-05-26 18:24:51 +02:00
/// Migrates the database.
2018-04-13 21:14:53 +02:00
pub fn migrate ( path : & Path , compaction_profile : & DatabaseCompactionProfile ) -> Result < ( ) , Error > {
2020-08-05 06:08:03 +02:00
let compaction_profile = helpers ::compaction_profile ( & compaction_profile , path ) ;
// read version file.
let version = current_version ( path ) ? ;
// migrate the databases.
// main db directory may already exists, so let's check if we have blocks dir
if version > CURRENT_VERSION {
return Err ( Error ::FutureDBVersion ) ;
}
// We are in the latest version, yay!
if version = = CURRENT_VERSION {
return Ok ( ( ) ) ;
}
2020-09-23 19:57:52 +02:00
if version ! = DEFAULT_VERSION & & version < = USE_MIGRATION_TOOL {
2020-09-23 13:49:25 +02:00
return Err ( Error ::UseMigrationTool ) ;
}
2020-08-05 06:08:03 +02:00
let db_path = consolidated_database_path ( path ) ;
// Further migrations
if version < CURRENT_VERSION & & exists ( & db_path ) {
println! (
" Migrating database from version {} to {} " ,
version , CURRENT_VERSION
) ;
migrate_database (
version ,
& db_path ,
consolidated_database_migrations ( & compaction_profile ) ? ,
) ? ;
if version < BLOOMS_DB_VERSION {
println! ( " Migrating blooms to blooms-db... " ) ;
let db_config = DatabaseConfig {
max_open_files : 64 ,
memory_budget : None ,
compaction : compaction_profile ,
columns : ethcore_db ::NUM_COLUMNS ,
} ;
migrate_blooms ( & db_path , & db_config ) . map_err ( Error ::BloomsDB ) ? ;
}
println! ( " Migration finished " ) ;
}
// update version file.
update_version ( path )
2016-05-26 18:24:51 +02:00
}