removed old migrations (#7974)

* removed old migrations

* improve SimpleMigration

* fixed migration tests

* fixed redundant whitespace

* add ToV13 migration which removes bloom groups

* bump CURRENT_VERSION of db
This commit is contained in:
Marek Kotewicz 2018-02-22 14:53:10 +01:00 committed by GitHub
parent d90ab40a78
commit ee93be80c0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 67 additions and 870 deletions

12
Cargo.lock generated
View File

@ -600,19 +600,7 @@ dependencies = [
name = "ethcore-migrations"
version = "0.1.0"
dependencies = [
"ethcore 1.9.0",
"ethcore-bloom-journal 0.1.0",
"ethcore-bytes 0.1.0",
"ethereum-types 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"journaldb 0.1.0",
"keccak-hash 0.1.0",
"kvdb 0.1.0",
"kvdb-rocksdb 0.1.0",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"macros 0.1.0",
"migration 0.1.0",
"patricia-trie 0.1.0",
"rlp 0.2.1",
]
[[package]]

View File

@ -4,16 +4,4 @@ version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
ethcore-bytes = { path = "../../util/bytes" }
ethereum-types = "0.2"
keccak-hash = { path = "../../util/hash" }
kvdb = { path = "../../util/kvdb" }
kvdb-rocksdb = { path = "../../util/kvdb-rocksdb" }
log = "0.3"
macros = { path = "../../util/macros" }
migration = { path = "../../util/migration" }
rlp = { path = "../../util/rlp" }
patricia-trie = { path = "../../util/patricia_trie" }
journaldb = { path = "../../util/journaldb" }
ethcore-bloom-journal = { path = "../../util/bloom" }
ethcore = { path = ".." }

View File

@ -1,21 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Blocks database migrations.
mod v8;
pub use self::v8::V8;

View File

@ -1,37 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! This migration compresses the state db.
use migration::{SimpleMigration, Progress};
use rlp::{Compressible, UntrustedRlp, RlpType};
/// Compressing migration.
#[derive(Default)]
pub struct V8(Progress);
impl SimpleMigration for V8 {
fn version(&self) -> u32 {
8
}
fn columns(&self) -> Option<u32> { None }
fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
self.0.tick();
Some((key,UntrustedRlp::new(&value).compress(RlpType::Blocks).into_vec()))
}
}

View File

@ -1,21 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Extras database migrations.
mod v6;
pub use self::v6::ToV6;

View File

@ -1,102 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use migration::SimpleMigration;
/// This migration reduces the sizes of keys and moves `ExtrasIndex` byte from back to the front.
pub struct ToV6;
impl ToV6 {
fn migrate_old_key(&self, old_key: Vec<u8>, index: u8, len: usize) -> Vec<u8> {
let mut result = vec![];
result.reserve(len);
unsafe {
result.set_len(len);
}
result[0] = index;
let old_key_start = 33 - len;
result[1..].clone_from_slice(&old_key[old_key_start..32]);
result
}
}
impl SimpleMigration for ToV6 {
fn columns(&self) -> Option<u32> { None }
fn version(&self) -> u32 { 6 }
fn simple_migrate(&mut self, mut key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
//// at this version all extras keys are 33 bytes long.
if key.len() == 33 {
// block details key changes:
// - index is moved to the front
if key[32] == 0 {
return Some((self.migrate_old_key(key, 0, 33), value));
}
// block hash key changes:
// - key is shorter 33 -> 5 bytes
// - index is moved to the front
if key[32] == 1 {
return Some((self.migrate_old_key(key, 1, 5), value));
}
// transaction addresses changes:
// - index is moved to the front
if key[32] == 2 {
return Some((self.migrate_old_key(key, 2, 33), value));
}
// block log blooms are removed
if key[32] == 3 {
return None;
}
// blocks blooms key changes:
// - key is shorter 33 -> 6 bytes
// - index is moved to the front
// - index is changed 4 -> 3
if key[32] == 4 {
key.reverse();
// i have no idea why it was reversed
let reverse = key;
let result = vec![
// new extras index is 3
3,
// 9th (+ prefix) byte was the level. Now it's second.
reverse[9],
reverse[4],
reverse[3],
reverse[2],
reverse[1],
];
return Some((result, value));
}
// blocks receipts key changes:
// - index is moved to the front
// - index is changed 5 -> 4
if key[32] == 5 {
return Some((self.migrate_old_key(key, 4, 33), value));
}
}
Some((key, value))
}
}

View File

@ -16,34 +16,9 @@
//! Database migrations.
#[macro_use]
extern crate log;
#[macro_use]
extern crate macros;
extern crate migration;
extern crate rlp;
extern crate ethereum_types;
extern crate ethcore_bytes as bytes;
extern crate kvdb;
extern crate kvdb_rocksdb;
extern crate keccak_hash as hash;
extern crate journaldb;
extern crate ethcore_bloom_journal as bloom_journal;
extern crate ethcore;
extern crate patricia_trie as trie;
use migration::ChangeColumns;
pub mod state;
pub mod blocks;
pub mod extras;
mod v9;
pub use self::v9::ToV9;
pub use self::v9::Extract;
mod v10;
pub use self::v10::ToV10;
use migration::{ChangeColumns, SimpleMigration};
/// The migration from v10 to v11.
/// Adds a column for node info.
@ -60,3 +35,30 @@ pub const TO_V12: ChangeColumns = ChangeColumns {
post_columns: Some(8),
version: 12,
};
#[derive(Default)]
pub struct ToV13;
impl SimpleMigration for ToV13 {
fn columns(&self) -> Option<u32> {
Some(8)
}
fn version(&self) -> u32 {
13
}
fn migrated_column_index(&self) -> Option<u32> {
// extras!
Some(3)
}
fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
// remove all bloom groups
if key[0] == 3 {
None
} else {
Some((key, value))
}
}
}

View File

@ -1,21 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! State database migrations.
mod v7;
pub use self::v7::{ArchiveV7, OverlayRecentV7};

View File

@ -1,263 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! This migration migrates the state db to use an accountdb which ensures uniqueness
//! using an address' hash as opposed to the address itself.
use std::collections::HashMap;
use ethereum_types::{H256, Address};
use bytes::Bytes;
use kvdb_rocksdb::Database;
use migration::{Batch, Config, Error, ErrorKind, Migration, SimpleMigration, Progress};
use hash::keccak;
use std::sync::Arc;
use rlp::{decode, Rlp, RlpStream};
// attempt to migrate a key, value pair. None if migration not possible.
fn attempt_migrate(mut key_h: H256, val: &[u8]) -> Option<H256> {
let val_hash = keccak(val);
if key_h != val_hash {
// this is a key which has been xor'd with an address.
// recover the address.
let address = key_h ^ val_hash;
// check that the address is actually a 20-byte value.
// the leftmost 12 bytes should be zero.
if &address[0..12] != &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] {
return None;
}
let address_hash = keccak(Address::from(address));
// create the xor'd key in place.
key_h.copy_from_slice(&*val_hash);
assert_eq!(key_h, val_hash);
{
let last_src: &[u8] = &*address_hash;
let last_dst: &mut [u8] = &mut *key_h;
for (k, a) in last_dst[12..].iter_mut().zip(&last_src[12..]) {
*k ^= *a;
}
}
Some(key_h)
} else {
None
}
}
/// Version for `ArchiveDB`.
#[derive(Default)]
pub struct ArchiveV7(Progress);
impl SimpleMigration for ArchiveV7 {
fn columns(&self) -> Option<u32> { None }
fn version(&self) -> u32 { 7 }
fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
self.0.tick();
if key.len() != 32 {
// metadata key, ignore.
return Some((key, value));
}
let key_h = H256::from_slice(&key[..]);
if let Some(new_key) = attempt_migrate(key_h, &value[..]) {
Some((new_key[..].to_owned(), value))
} else {
Some((key, value))
}
}
}
// magic numbers and constants for overlay-recent at v6.
// re-written here because it may change in the journaldb module.
const V7_LATEST_ERA_KEY: &'static [u8] = &[ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ];
const V7_VERSION_KEY: &'static [u8] = &[ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ];
const DB_VERSION: u32 = 0x203;
const PADDING : [u8; 10] = [0u8; 10];
/// Version for `OverlayRecent` database.
/// more involved than the archive version because of journaling.
#[derive(Default)]
pub struct OverlayRecentV7 {
migrated_keys: HashMap<H256, H256>,
}
impl OverlayRecentV7 {
// walk all journal entries in the database backwards.
// find migrations for any possible inserted keys.
fn walk_journal(&mut self, source: Arc<Database>) -> Result<(), Error> {
if let Some(val) = source.get(None, V7_LATEST_ERA_KEY)? {
let mut era = decode::<u64>(&val);
loop {
let mut index: usize = 0;
loop {
let entry_key = {
let mut r = RlpStream::new_list(3);
r.append(&era).append(&index).append(&&PADDING[..]);
r.out()
};
if let Some(journal_raw) = source.get(None, &entry_key)? {
let rlp = Rlp::new(&journal_raw);
// migrate all inserted keys.
for r in rlp.at(1).iter() {
let key: H256 = r.val_at(0);
let v: Bytes = r.val_at(1);
if self.migrated_keys.get(&key).is_none() {
if let Some(new_key) = attempt_migrate(key, &v) {
self.migrated_keys.insert(key, new_key);
}
}
}
index += 1;
} else {
break;
}
}
if index == 0 || era == 0 {
break;
}
era -= 1;
}
}
Ok(())
}
// walk all journal entries in the database backwards.
// replace all possible inserted/deleted keys with their migrated counterparts
// and commit the altered entries.
fn migrate_journal(&self, source: Arc<Database>, mut batch: Batch, dest: &mut Database) -> Result<(), Error> {
if let Some(val) = source.get(None, V7_LATEST_ERA_KEY)? {
batch.insert(V7_LATEST_ERA_KEY.into(), val.clone().into_vec(), dest)?;
let mut era = decode::<u64>(&val);
loop {
let mut index: usize = 0;
loop {
let entry_key = {
let mut r = RlpStream::new_list(3);
r.append(&era).append(&index).append(&&PADDING[..]);
r.out()
};
if let Some(journal_raw) = source.get(None, &entry_key)? {
let rlp = Rlp::new(&journal_raw);
let id: H256 = rlp.val_at(0);
let mut inserted_keys: Vec<(H256, Bytes)> = Vec::new();
// migrate all inserted keys.
for r in rlp.at(1).iter() {
let mut key: H256 = r.val_at(0);
let v: Bytes = r.val_at(1);
if let Some(new_key) = self.migrated_keys.get(&key) {
key = *new_key;
}
inserted_keys.push((key, v));
}
// migrate all deleted keys.
let mut deleted_keys: Vec<H256> = rlp.list_at(2);
for old_key in &mut deleted_keys {
if let Some(new) = self.migrated_keys.get(&*old_key) {
*old_key = new.clone();
}
}
// rebuild the journal entry rlp.
let mut stream = RlpStream::new_list(3);
stream.append(&id);
stream.begin_list(inserted_keys.len());
for (k, v) in inserted_keys {
stream.begin_list(2).append(&k).append(&v);
}
stream.append_list(&deleted_keys);
// and insert it into the new database.
batch.insert(entry_key, stream.out(), dest)?;
index += 1;
} else {
break;
}
}
if index == 0 || era == 0 {
break;
}
era -= 1;
}
}
batch.commit(dest)
}
}
impl Migration for OverlayRecentV7 {
fn columns(&self) -> Option<u32> { None }
fn version(&self) -> u32 { 7 }
// walk all records in the database, attempting to migrate any possible and
// keeping records of those that we do. then migrate the journal using
// this information.
fn migrate(&mut self, source: Arc<Database>, config: &Config, dest: &mut Database, col: Option<u32>) -> Result<(), Error> {
let mut batch = Batch::new(config, col);
// check version metadata.
match source.get(None, V7_VERSION_KEY)? {
Some(ref version) if decode::<u32>(&*version) == DB_VERSION => {}
_ => return Err(ErrorKind::MigrationImpossible.into()), // missing or wrong version
}
let mut count = 0;
for (key, value) in source.iter(None).into_iter().flat_map(|inner| inner) {
count += 1;
if count == 100_000 {
count = 0;
flush!(".");
}
let mut key = key.into_vec();
if key.len() == 32 {
let key_h = H256::from_slice(&key[..]);
if let Some(new_key) = attempt_migrate(key_h.clone(), &value) {
self.migrated_keys.insert(key_h, new_key);
key.copy_from_slice(&new_key[..]);
}
}
batch.insert(key, value.into_vec(), dest)?;
}
self.walk_journal(source.clone())?;
self.migrate_journal(source, batch, dest)
}
}

View File

@ -1,119 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Bloom upgrade
use std::sync::Arc;
use ethcore::db::{COL_EXTRA, COL_HEADERS, COL_STATE};
use ethcore::state_db::{ACCOUNT_BLOOM_SPACE, DEFAULT_ACCOUNT_PRESET, StateDB};
use trie::TrieDB;
use ethcore::views::HeaderView;
use bloom_journal::Bloom;
use migration::{Error, Migration, Progress, Batch, Config, ErrorKind};
use journaldb;
use ethereum_types::H256;
use trie::Trie;
use kvdb::{DBTransaction, ResultExt};
use kvdb_rocksdb::Database;
/// Account bloom upgrade routine. If bloom already present, does nothing.
/// If database empty (no best block), does nothing.
/// Can be called on upgraded database with no issues (will do nothing).
pub fn generate_bloom(source: Arc<Database>, dest: &mut Database) -> Result<(), Error> {
trace!(target: "migration", "Account bloom upgrade started");
let best_block_hash = match source.get(COL_EXTRA, b"best")? {
// no migration needed
None => {
trace!(target: "migration", "No best block hash, skipping");
return Ok(());
},
Some(hash) => hash,
};
let best_block_header = match source.get(COL_HEADERS, &best_block_hash)? {
// no best block, nothing to do
None => {
trace!(target: "migration", "No best block header, skipping");
return Ok(())
},
Some(x) => x,
};
let state_root = HeaderView::new(&best_block_header).state_root();
trace!("Adding accounts bloom (one-time upgrade)");
let bloom_journal = {
let mut bloom = Bloom::new(ACCOUNT_BLOOM_SPACE, DEFAULT_ACCOUNT_PRESET);
// no difference what algorithm is passed, since there will be no writes
let state_db = journaldb::new(
source.clone(),
journaldb::Algorithm::OverlayRecent,
COL_STATE);
let account_trie = TrieDB::new(state_db.as_hashdb(), &state_root).chain_err(|| "Cannot open trie")?;
for item in account_trie.iter().map_err(|_| ErrorKind::MigrationImpossible)? {
let (ref account_key, _) = item.map_err(|_| ErrorKind::MigrationImpossible)?;
let account_key_hash = H256::from_slice(account_key);
bloom.set(&*account_key_hash);
}
bloom.drain_journal()
};
trace!(target: "migration", "Generated {} bloom updates", bloom_journal.entries.len());
let mut batch = DBTransaction::new();
StateDB::commit_bloom(&mut batch, bloom_journal).chain_err(|| "Failed to commit bloom")?;
dest.write(batch)?;
trace!(target: "migration", "Finished bloom update");
Ok(())
}
/// Account bloom migration.
#[derive(Default)]
pub struct ToV10 {
progress: Progress,
}
impl ToV10 {
/// New v10 migration
pub fn new() -> ToV10 { ToV10 { progress: Progress::default() } }
}
impl Migration for ToV10 {
fn version(&self) -> u32 {
10
}
fn pre_columns(&self) -> Option<u32> { Some(5) }
fn columns(&self) -> Option<u32> { Some(6) }
fn migrate(&mut self, source: Arc<Database>, config: &Config, dest: &mut Database, col: Option<u32>) -> Result<(), Error> {
let mut batch = Batch::new(config, col);
for (key, value) in source.iter(col).into_iter().flat_map(|inner| inner) {
self.progress.tick();
batch.insert(key.into_vec(), value.into_vec(), dest)?;
}
batch.commit(dest)?;
if col == COL_STATE {
generate_bloom(source, dest)?;
}
Ok(())
}
}

View File

@ -1,82 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! This migration consolidates all databases into single one using Column Families.
use rlp::{Rlp, RlpStream};
use kvdb_rocksdb::Database;
use migration::{Batch, Config, Error, Migration, Progress};
use std::sync::Arc;
/// Which part of block to preserve
pub enum Extract {
/// Extract block header RLP.
Header,
/// Extract block body RLP.
Body,
/// Don't change the value.
All,
}
/// Consolidation of extras/block/state databases into single one.
pub struct ToV9 {
progress: Progress,
column: Option<u32>,
extract: Extract,
}
impl ToV9 {
/// Creates new V9 migration and assigns all `(key,value)` pairs from `source` DB to given Column Family
pub fn new(column: Option<u32>, extract: Extract) -> Self {
ToV9 {
progress: Progress::default(),
column: column,
extract: extract,
}
}
}
impl Migration for ToV9 {
fn columns(&self) -> Option<u32> { Some(5) }
fn version(&self) -> u32 { 9 }
fn migrate(&mut self, source: Arc<Database>, config: &Config, dest: &mut Database, col: Option<u32>) -> Result<(), Error> {
let mut batch = Batch::new(config, self.column);
for (key, value) in source.iter(col).into_iter().flat_map(|inner| inner) {
self.progress.tick();
match self.extract {
Extract::Header => {
batch.insert(key.into_vec(), Rlp::new(&value).at(0).as_raw().to_vec(), dest)?
},
Extract::Body => {
let mut body = RlpStream::new_list(2);
let block_rlp = Rlp::new(&value);
body.append_raw(block_rlp.at(1).as_raw(), 1);
body.append_raw(block_rlp.at(2).as_raw(), 1);
batch.insert(key.into_vec(), body.out(), dest)?
},
Extract::All => {
batch.insert(key.into_vec(), value.into_vec(), dest)?
}
}
}
batch.commit(dest)
}
}

View File

@ -275,7 +275,7 @@ pub fn execute_upgrades(
}
let client_path = dirs.db_path(pruning);
migrate(&client_path, pruning, compaction_profile).map_err(|e| format!("{}", e))
migrate(&client_path, compaction_profile).map_err(|e| format!("{}", e))
}
/// Prompts user asking for password.

View File

@ -15,22 +15,17 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::fs;
use std::fs::File;
use std::io::{Read, Write, Error as IoError, ErrorKind};
use std::path::{Path, PathBuf};
use std::fmt::{Display, Formatter, Error as FmtError};
use std::sync::Arc;
use journaldb::Algorithm;
use migr::{self, Manager as MigrationManager, Config as MigrationConfig, Migration};
use kvdb;
use kvdb_rocksdb::{CompactionProfile, Database, DatabaseConfig};
use migrations::{self, Extract};
use ethcore::db;
use migr::{self, Manager as MigrationManager, Config as MigrationConfig};
use kvdb_rocksdb::CompactionProfile;
use migrations;
/// Database is assumed to be at default version, when no version file is found.
const DEFAULT_VERSION: u32 = 5;
/// Current version of database models.
const CURRENT_VERSION: u32 = 12;
const CURRENT_VERSION: u32 = 13;
/// First version of the consolidated database.
const CONSOLIDATION_VERSION: u32 = 9;
/// Defines how many items are migrated to the new version of database at once.
@ -43,14 +38,10 @@ const VERSION_FILE_NAME: &'static str = "db_version";
pub enum Error {
/// Returned when current version cannot be read or guessed.
UnknownDatabaseVersion,
/// Migration does not support existing pruning algorithm.
UnsupportedPruningMethod,
/// Existing DB is newer than the known one.
FutureDBVersion,
/// Migration is not possible.
MigrationImpossible,
/// Migration unexpectadly failed.
MigrationFailed,
/// Internal migration error.
Internal(migr::Error),
/// Migration was completed succesfully,
@ -62,10 +53,8 @@ impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
let out = match *self {
Error::UnknownDatabaseVersion => "Current database version cannot be read".into(),
Error::UnsupportedPruningMethod => "Unsupported pruning method for database migration. Delete DB and resync.".into(),
Error::FutureDBVersion => "Database was created with newer client version. Upgrade your client or delete DB and resync.".into(),
Error::MigrationImpossible => format!("Database migration to version {} is not possible.", CURRENT_VERSION),
Error::MigrationFailed => "Database migration unexpectedly failed".into(),
Error::Internal(ref err) => format!("{}", err),
Error::Io(ref err) => format!("Unexpected io error on DB migration: {}.", err),
};
@ -99,7 +88,7 @@ fn version_file_path(path: &Path) -> PathBuf {
/// Reads current database version from the file at given path.
/// If the file does not exist returns `DEFAULT_VERSION`.
fn current_version(path: &Path) -> Result<u32, Error> {
match File::open(version_file_path(path)) {
match fs::File::open(version_file_path(path)) {
Err(ref err) if err.kind() == ErrorKind::NotFound => Ok(DEFAULT_VERSION),
Err(_) => Err(Error::UnknownDatabaseVersion),
Ok(mut file) => {
@ -114,7 +103,7 @@ fn current_version(path: &Path) -> Result<u32, Error> {
/// Creates a new file if the version file does not exist yet.
fn update_version(path: &Path) -> Result<(), Error> {
fs::create_dir_all(path)?;
let mut file = File::create(version_file_path(path))?;
let mut file = fs::File::create(version_file_path(path))?;
file.write_all(format!("{}", CURRENT_VERSION).as_bytes())?;
Ok(())
}
@ -145,49 +134,12 @@ pub fn default_migration_settings(compaction_profile: &CompactionProfile) -> Mig
/// Migrations on the consolidated database.
fn consolidated_database_migrations(compaction_profile: &CompactionProfile) -> Result<MigrationManager, Error> {
let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
manager.add_migration(migrations::ToV10::new()).map_err(|_| Error::MigrationImpossible)?;
manager.add_migration(migrations::TO_V11).map_err(|_| Error::MigrationImpossible)?;
manager.add_migration(migrations::TO_V12).map_err(|_| Error::MigrationImpossible)?;
manager.add_migration(migrations::ToV13::default()).map_err(|_| Error::MigrationImpossible)?;
Ok(manager)
}
/// Consolidates legacy databases into single one.
fn consolidate_database(
old_db_path: PathBuf,
new_db_path: PathBuf,
column: Option<u32>,
extract: Extract,
compaction_profile: &CompactionProfile) -> Result<(), Error> {
fn db_error(e: kvdb::Error) -> Error {
warn!("Cannot open Database for consolidation: {:?}", e);
Error::MigrationFailed
}
let mut migration = migrations::ToV9::new(column, extract);
let config = default_migration_settings(compaction_profile);
let mut db_config = DatabaseConfig {
max_open_files: 64,
memory_budget: None,
compaction: config.compaction_profile,
columns: None,
wal: true,
};
let old_path_str = old_db_path.to_str().ok_or(Error::MigrationImpossible)?;
let new_path_str = new_db_path.to_str().ok_or(Error::MigrationImpossible)?;
let cur_db = Arc::new(Database::open(&db_config, old_path_str).map_err(db_error)?);
// open new DB with proper number of columns
db_config.columns = migration.columns();
let mut new_db = Database::open(&db_config, new_path_str).map_err(db_error)?;
// Migrate to new database (default column only)
migration.migrate(cur_db, &config, &mut new_db, None)?;
Ok(())
}
/// Migrates database at given position with given migration rules.
fn migrate_database(version: u32, db_path: PathBuf, mut migrations: MigrationManager) -> Result<(), Error> {
// check if migration is needed
@ -225,7 +177,7 @@ fn exists(path: &Path) -> bool {
}
/// Migrates the database.
pub fn migrate(path: &Path, pruning: Algorithm, compaction_profile: CompactionProfile) -> Result<(), Error> {
pub fn migrate(path: &Path, compaction_profile: CompactionProfile) -> Result<(), Error> {
// read version file.
let version = current_version(path)?;
@ -240,32 +192,6 @@ pub fn migrate(path: &Path, pruning: Algorithm, compaction_profile: CompactionPr
return Ok(())
}
// Perform pre-consolidation migrations
if version < CONSOLIDATION_VERSION && exists(&legacy::blocks_database_path(path)) {
println!("Migrating database from version {} to {}", version, CONSOLIDATION_VERSION);
migrate_database(version, legacy::extras_database_path(path), legacy::extras_database_migrations(&compaction_profile)?)?;
migrate_database(version, legacy::state_database_path(path), legacy::state_database_migrations(pruning, &compaction_profile)?)?;
migrate_database(version, legacy::blocks_database_path(path), legacy::blocks_database_migrations(&compaction_profile)?)?;
let db_path = consolidated_database_path(path);
// Remove the database dir (it shouldn't exist anyway, but it might when migration was interrupted)
let _ = fs::remove_dir_all(db_path.clone());
consolidate_database(legacy::blocks_database_path(path), db_path.clone(), db::COL_HEADERS, Extract::Header, &compaction_profile)?;
consolidate_database(legacy::blocks_database_path(path), db_path.clone(), db::COL_BODIES, Extract::Body, &compaction_profile)?;
consolidate_database(legacy::extras_database_path(path), db_path.clone(), db::COL_EXTRA, Extract::All, &compaction_profile)?;
consolidate_database(legacy::state_database_path(path), db_path.clone(), db::COL_STATE, Extract::All, &compaction_profile)?;
consolidate_database(legacy::trace_database_path(path), db_path.clone(), db::COL_TRACE, Extract::All, &compaction_profile)?;
let _ = fs::remove_dir_all(legacy::blocks_database_path(path));
let _ = fs::remove_dir_all(legacy::extras_database_path(path));
let _ = fs::remove_dir_all(legacy::state_database_path(path));
let _ = fs::remove_dir_all(legacy::trace_database_path(path));
println!("Migration finished");
}
// update version so we can apply post-consolidation migrations.
let version = ::std::cmp::max(CONSOLIDATION_VERSION, version);
// Further migrations
if version >= CONSOLIDATION_VERSION && version < CURRENT_VERSION && exists(&consolidated_database_path(path)) {
println!("Migrating database from version {} to {}", ::std::cmp::max(CONSOLIDATION_VERSION, version), CURRENT_VERSION);
@ -276,67 +202,3 @@ pub fn migrate(path: &Path, pruning: Algorithm, compaction_profile: CompactionPr
// update version file.
update_version(path)
}
/// Old migrations utilities
mod legacy {
use super::*;
use std::path::{Path, PathBuf};
use migr::{Manager as MigrationManager};
use kvdb_rocksdb::CompactionProfile;
use migrations;
/// Blocks database path.
pub fn blocks_database_path(path: &Path) -> PathBuf {
let mut blocks_path = path.to_owned();
blocks_path.push("blocks");
blocks_path
}
/// Extras database path.
pub fn extras_database_path(path: &Path) -> PathBuf {
let mut extras_path = path.to_owned();
extras_path.push("extras");
extras_path
}
/// State database path.
pub fn state_database_path(path: &Path) -> PathBuf {
let mut state_path = path.to_owned();
state_path.push("state");
state_path
}
/// Trace database path.
pub fn trace_database_path(path: &Path) -> PathBuf {
let mut blocks_path = path.to_owned();
blocks_path.push("tracedb");
blocks_path
}
/// Migrations on the blocks database.
pub fn blocks_database_migrations(compaction_profile: &CompactionProfile) -> Result<MigrationManager, Error> {
let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
manager.add_migration(migrations::blocks::V8::default()).map_err(|_| Error::MigrationImpossible)?;
Ok(manager)
}
/// Migrations on the extras database.
pub fn extras_database_migrations(compaction_profile: &CompactionProfile) -> Result<MigrationManager, Error> {
let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
manager.add_migration(migrations::extras::ToV6).map_err(|_| Error::MigrationImpossible)?;
Ok(manager)
}
/// Migrations on the state database.
pub fn state_database_migrations(pruning: Algorithm, compaction_profile: &CompactionProfile) -> Result<MigrationManager, Error> {
let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
let res = match pruning {
Algorithm::Archive => manager.add_migration(migrations::state::ArchiveV7::default()),
Algorithm::OverlayRecent => manager.add_migration(migrations::state::OverlayRecentV7::default()),
_ => return Err(Error::UnsupportedPruningMethod),
};
res.map_err(|_| Error::MigrationImpossible)?;
Ok(manager)
}
}

View File

@ -130,12 +130,14 @@ pub trait Migration: 'static {
fn migrate(&mut self, source: Arc<Database>, config: &Config, destination: &mut Database, col: Option<u32>) -> Result<()>;
}
/// A simple migration over key-value pairs.
/// A simple migration over key-value pairs of a single column.
pub trait SimpleMigration: 'static {
/// Number of columns in database after the migration.
fn columns(&self) -> Option<u32>;
/// Version of database after the migration.
fn version(&self) -> u32;
/// Index of column which should be migrated.
fn migrated_column_index(&self) -> Option<u32>;
/// Should migrate existing object to new database.
/// Returns `None` if the object does not exist in new version of database.
fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)>;
@ -149,6 +151,7 @@ impl<T: SimpleMigration> Migration for T {
fn alters_existing(&self) -> bool { true }
fn migrate(&mut self, source: Arc<Database>, config: &Config, dest: &mut Database, col: Option<u32>) -> Result<()> {
let migration_needed = col == SimpleMigration::migrated_column_index(self);
let mut batch = Batch::new(config, col);
let iter = match source.iter(col) {
@ -157,9 +160,13 @@ impl<T: SimpleMigration> Migration for T {
};
for (key, value) in iter {
if migration_needed {
if let Some((key, value)) = self.simple_migrate(key.into_vec(), value.into_vec()) {
batch.insert(key, value, dest)?;
}
} else {
batch.insert(key.into_vec(), value.into_vec(), dest)?;
}
}
batch.commit(dest)

View File

@ -63,9 +63,17 @@ fn verify_migration(path: &Path, pairs: BTreeMap<Vec<u8>, Vec<u8>>) {
struct Migration0;
impl SimpleMigration for Migration0 {
fn columns(&self) -> Option<u32> { None }
fn columns(&self) -> Option<u32> {
None
}
fn version(&self) -> u32 { 1 }
fn version(&self) -> u32 {
1
}
fn migrated_column_index(&self) -> Option<u32> {
None
}
fn simple_migrate(&mut self, mut key: Vec<u8>, mut value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
key.push(0x11);
@ -78,9 +86,17 @@ impl SimpleMigration for Migration0 {
struct Migration1;
impl SimpleMigration for Migration1 {
fn columns(&self) -> Option<u32> { None }
fn columns(&self) -> Option<u32> {
None
}
fn version(&self) -> u32 { 2 }
fn version(&self) -> u32 {
2
}
fn migrated_column_index(&self) -> Option<u32> {
None
}
fn simple_migrate(&mut self, key: Vec<u8>, _value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
Some((key, vec![]))