From 171e93752c51a7f5e13177e7a543fda889868e5a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 8 Jul 2016 16:07:06 +0200 Subject: [PATCH] overlay recent v7 migration --- ethcore/src/migrations/state/mod.rs | 2 +- ethcore/src/migrations/state/v7.rs | 194 +++++++++++++++++++++++----- parity/migration.rs | 11 +- 3 files changed, 173 insertions(+), 34 deletions(-) diff --git a/ethcore/src/migrations/state/mod.rs b/ethcore/src/migrations/state/mod.rs index bd4454e4f..9a6b9e086 100644 --- a/ethcore/src/migrations/state/mod.rs +++ b/ethcore/src/migrations/state/mod.rs @@ -18,4 +18,4 @@ mod v7; -pub use self::v7::ArchiveV7; \ No newline at end of file +pub use self::v7::{ArchiveV7, OverlayRecentV7}; \ No newline at end of file diff --git a/ethcore/src/migrations/state/v7.rs b/ethcore/src/migrations/state/v7.rs index 0ff880cfa..b5eeb77a5 100644 --- a/ethcore/src/migrations/state/v7.rs +++ b/ethcore/src/migrations/state/v7.rs @@ -1,9 +1,63 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! This migration migrates the state db to use an accountdb which ensures uniqueness +//! using an address' hash as opposed to the address itself. + +use std::collections::HashMap; + +use util::Bytes; use util::hash::{Address, FixedHash, H256}; -use util::migration::SimpleMigration; +use util::kvdb::Database; +use util::migration::{Batch, Config, Error, Migration, SimpleMigration}; +use util::rlp::{decode, Rlp, RlpStream, Stream, View}; use util::sha3::Hashable; -/// This migration migrates the state db to use an accountdb which ensures uniqueness -/// using an address' hash as opposed to the address itself. Works only for ArchiveDB. +// attempt to migrate a key, value pair. Err if migration not possible. +fn attempt_migrate(mut key_h: H256, val: &[u8]) -> Result { + let val_hash = val.sha3(); + + if key_h != val_hash { + // this is a key which has been xor'd with an address. + // recover the address. + let address = key_h ^ val_hash; + + // check that the address is actually a 20-byte value. + // the leftmost 12 bytes should be zero. + if &address[0..12] != &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] { + return Err(key_h); + } + + let address_hash = Address::from(address).sha3(); + + // create the xor'd key in place. + key_h.copy_from_slice(&*val_hash); + assert_eq!(key_h, val_hash); + + let last_src: &[u8] = &*address_hash; + let last_dst: &mut [u8] = &mut *key_h; + for (k, a) in last_dst[12..].iter_mut().zip(&last_src[12..]) { + *k ^= *a; + } + } + + Ok(key_h) +} + +/// Version for ArchiveDB. pub struct ArchiveV7; impl SimpleMigration for ArchiveV7 { @@ -11,39 +65,121 @@ impl SimpleMigration for ArchiveV7 { 7 } - fn simple_migrate(&self, mut key: Vec, value: Vec) -> Option<(Vec, Vec)> { + fn simple_migrate(&mut self, key: Vec, value: Vec) -> Option<(Vec, Vec)> { if key.len() != 32 { // metadata key, ignore. return Some((key, value)); } - let val_hash = value.sha3(); let key_h = H256::from_slice(&key[..]); - if key_h != val_hash { - // this is a key which has been xor'd with an address. - // recover the address. - let address = key_h ^ val_hash; + let migrated = attempt_migrate(key_h, &value[..]) + .expect("no 32-bit metadata keys in this version of archive; qed"); - // check that the address is actually a 20-byte value. - // the leftmost 12 bytes should be zero. - if &address[0..12] != &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] { - // metadata key that was 32 bytes, ignore. - return Some((key, value)); - } - - let address_hash = Address::from(address).sha3(); - - // create the xor'd key in place. - key.copy_from_slice(&*val_hash); - assert_eq!(key, &*val_hash); - - let last_src: &[u8] = &*address_hash; - for (k, a) in key[12..].iter_mut().zip(&last_src[12..]) { - *k ^= *a; - } - } - // nothing to do here - Some((key, value)) + Some((migrated[..].to_owned(), value)) } } +/// Version for OverlayRecent database. +/// more involved than the archive version because of journaling. +#[derive(Default)] +pub struct OverlayRecentV7 { + migrated_keys: HashMap, +} + +impl OverlayRecentV7 { + // walk all journal entries in the database backwards, + // replacing any known migrated keys with their counterparts + // and then committing again. + fn migrate_journal(&self, source: &Database, mut batch: Batch, dest: &mut Database) -> Result<(), Error> { + // re-written here because it may change in the journaldb module. + const V7_LATEST_ERA_KEY: &'static [u8] = &[ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; + const PADDING : [u8; 10] = [0u8; 10]; + + + if let Some(val) = source.get(V7_LATEST_ERA_KEY).expect("Low-level database error.") { + try!(batch.insert(V7_LATEST_ERA_KEY.into(), val.to_owned(), dest)); + + let mut era = decode::(&val); + loop { + let mut index: usize = 0; + loop { + let entry_key = { + let mut r = RlpStream::new_list(3); + r.append(&era).append(&index).append(&&PADDING[..]); + r.out() + }; + + if let Some(journal_raw) = source.get(&entry_key).expect("Low-level database error.") { + let rlp = Rlp::new(&journal_raw); + let id: H256 = rlp.val_at(0); + let mut inserted_keys: Vec<(H256, Bytes)> = Vec::new(); + + // migrate all inserted keys. + for r in rlp.at(1).iter() { + let old_key: H256 = r.val_at(0); + let v: Bytes = r.val_at(1); + + let key = match self.migrated_keys.get(&old_key) { + Some(new) => new.clone(), + None => old_key.clone(), + }; + + inserted_keys.push((key, v)); + } + + // migrate all deleted keys. + let mut deleted_keys: Vec = rlp.val_at(2); + for old_key in &mut deleted_keys { + if let Some(new) = self.migrated_keys.get(&*old_key) { + *old_key = new.clone(); + } + } + + // rebuild the journal entry rlp. + let mut stream = RlpStream::new_list(3); + stream.append(&id); + stream.begin_list(inserted_keys.len()); + for (k, v) in inserted_keys { + stream.begin_list(2).append(&k).append(&v); + } + + stream.append(&deleted_keys); + + // and insert it into the new database. + try!(batch.insert(entry_key, stream.out(), dest)); + index += 1; + } else { + break; + } + } + + if index == 0 || era == 0 { + break; + } + era -= 1; + } + } + Ok(()) + } +} + +impl Migration for OverlayRecentV7 { + fn version(&self) -> u32 { 7 } + + // walk all records in the database, attempting to migrate any possible and + // keeping records of those that we do. then migrate the journal using + // this information. + fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database) -> Result<(), Error> { + let mut batch = Batch::new(config); + + for (key, value) in source.iter().filter(|&(ref k, _)| k.len() == 32) { + let key_h = H256::from_slice(&key[..]); + if let Ok(new_key) = attempt_migrate(key_h.clone(), &value) { + self.migrated_keys.insert(key_h, new_key); + try!(batch.insert(new_key[..].to_owned(), value.into_vec(), dest)); + } + } + + self.migrate_journal(source, batch, dest) + } +} \ No newline at end of file diff --git a/parity/migration.rs b/parity/migration.rs index 65fa080c7..853cb6e35 100644 --- a/parity/migration.rs +++ b/parity/migration.rs @@ -156,15 +156,18 @@ fn extras_database_migrations() -> Result { /// Migrations on the state database. fn state_database_migrations(pruning: Algorithm) -> Result { let mut manager = MigrationManager::new(default_migration_settings()); - match pruning { - Algorithm::Archive => try!(manager.add_migration(migrations::state::ArchiveV7).map_err(|_| Error::MigrationImpossible)), + let res = match pruning { + Algorithm::Archive => manager.add_migration(migrations::state::ArchiveV7), + Algorithm::OverlayRecent => manager.add_migration(migrations::state::OverlayRecentV7::default()), _ => die!("Unsupported pruning method for migration. Delete DB and resync"), - } + }; + + try!(res.map_err(|_| Error::MigrationImpossible)); Ok(manager) } /// Migrates database at given position with given migration rules. -fn migrate_database(version: u32, db_path: PathBuf, migrations: mut MigrationManager) -> Result<(), Error> { +fn migrate_database(version: u32, db_path: PathBuf, mut migrations: MigrationManager) -> Result<(), Error> { // check if migration is needed if !migrations.is_needed(version) { return Ok(())