Merge branch 'master' into client-ipc-refact
This commit is contained in:
commit
b25a37eb38
@ -9,8 +9,7 @@
|
|||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"blockReward": "0x4563918244F40000",
|
"blockReward": "0x4563918244F40000",
|
||||||
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
||||||
"frontierCompatibilityModeLimit": "0x118c30",
|
"frontierCompatibilityModeLimit": "0x118c30"
|
||||||
"daoRescueSoftFork": false
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -9,8 +9,7 @@
|
|||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"blockReward": "0x4563918244F40000",
|
"blockReward": "0x4563918244F40000",
|
||||||
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
||||||
"frontierCompatibilityModeLimit": "0x118c30",
|
"frontierCompatibilityModeLimit": "0x118c30"
|
||||||
"daoRescueSoftFork": false
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -9,8 +9,7 @@
|
|||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"blockReward": "0x4563918244F40000",
|
"blockReward": "0x4563918244F40000",
|
||||||
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
||||||
"frontierCompatibilityModeLimit": "0x118c30",
|
"frontierCompatibilityModeLimit": "0x118c30"
|
||||||
"daoRescueSoftFork": true
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -9,8 +9,7 @@
|
|||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"blockReward": "0x4563918244F40000",
|
"blockReward": "0x4563918244F40000",
|
||||||
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
||||||
"frontierCompatibilityModeLimit": "0xffffffffffffffff",
|
"frontierCompatibilityModeLimit": "0xffffffffffffffff"
|
||||||
"daoRescueSoftFork": false
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -9,8 +9,7 @@
|
|||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"blockReward": "0x4563918244F40000",
|
"blockReward": "0x4563918244F40000",
|
||||||
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
||||||
"frontierCompatibilityModeLimit": 0,
|
"frontierCompatibilityModeLimit": 0
|
||||||
"daoRescueSoftFork": false
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -9,8 +9,7 @@
|
|||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"blockReward": "0x4563918244F40000",
|
"blockReward": "0x4563918244F40000",
|
||||||
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
||||||
"frontierCompatibilityModeLimit": 0,
|
"frontierCompatibilityModeLimit": 0
|
||||||
"daoRescueSoftFork": true
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -9,8 +9,7 @@
|
|||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"blockReward": "0x4563918244F40000",
|
"blockReward": "0x4563918244F40000",
|
||||||
"registrar": "",
|
"registrar": "",
|
||||||
"frontierCompatibilityModeLimit": "0x789b0",
|
"frontierCompatibilityModeLimit": "0x789b0"
|
||||||
"daoRescueSoftFork": false
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -9,8 +9,7 @@
|
|||||||
"durationLimit": "0x08",
|
"durationLimit": "0x08",
|
||||||
"blockReward": "0x14D1120D7B160000",
|
"blockReward": "0x14D1120D7B160000",
|
||||||
"registrar": "5e70c0bbcd5636e0f9f9316e9f8633feb64d4050",
|
"registrar": "5e70c0bbcd5636e0f9f9316e9f8633feb64d4050",
|
||||||
"frontierCompatibilityModeLimit": "0xffffffffffffffff",
|
"frontierCompatibilityModeLimit": "0xffffffffffffffff"
|
||||||
"daoRescueSoftFork": false
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -203,7 +203,6 @@ mod tests {
|
|||||||
timestamp: 0,
|
timestamp: 0,
|
||||||
difficulty: 0.into(),
|
difficulty: 0.into(),
|
||||||
last_hashes: vec![],
|
last_hashes: vec![],
|
||||||
dao_rescue_block_gas_limit: None,
|
|
||||||
gas_used: 0.into(),
|
gas_used: 0.into(),
|
||||||
gas_limit: 0.into(),
|
gas_limit: 0.into(),
|
||||||
});
|
});
|
||||||
@ -254,7 +253,7 @@ mod tests {
|
|||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let last_hashes = vec![genesis_header.hash()];
|
let last_hashes = vec![genesis_header.hash()];
|
||||||
let vm_factory = Default::default();
|
let vm_factory = Default::default();
|
||||||
let b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, None, addr, (3141562.into(), 31415620.into()), vec![]).unwrap();
|
let b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||||
let b = b.close_and_lock();
|
let b = b.close_and_lock();
|
||||||
let seal = engine.generate_seal(b.block(), Some(&tap)).unwrap();
|
let seal = engine.generate_seal(b.block(), Some(&tap)).unwrap();
|
||||||
assert!(b.try_seal(engine.deref(), seal).is_ok());
|
assert!(b.try_seal(engine.deref(), seal).is_ok());
|
||||||
|
@ -183,7 +183,6 @@ pub struct OpenBlock<'x> {
|
|||||||
engine: &'x Engine,
|
engine: &'x Engine,
|
||||||
vm_factory: &'x EvmFactory,
|
vm_factory: &'x EvmFactory,
|
||||||
last_hashes: LastHashes,
|
last_hashes: LastHashes,
|
||||||
dao_rescue_block_gas_limit: Option<U256>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Just like `OpenBlock`, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields,
|
/// Just like `OpenBlock`, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields,
|
||||||
@ -195,7 +194,6 @@ pub struct ClosedBlock {
|
|||||||
block: ExecutedBlock,
|
block: ExecutedBlock,
|
||||||
uncle_bytes: Bytes,
|
uncle_bytes: Bytes,
|
||||||
last_hashes: LastHashes,
|
last_hashes: LastHashes,
|
||||||
dao_rescue_block_gas_limit: Option<U256>,
|
|
||||||
unclosed_state: State,
|
unclosed_state: State,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -227,7 +225,6 @@ impl<'x> OpenBlock<'x> {
|
|||||||
db: Box<JournalDB>,
|
db: Box<JournalDB>,
|
||||||
parent: &Header,
|
parent: &Header,
|
||||||
last_hashes: LastHashes,
|
last_hashes: LastHashes,
|
||||||
dao_rescue_block_gas_limit: Option<U256>,
|
|
||||||
author: Address,
|
author: Address,
|
||||||
gas_range_target: (U256, U256),
|
gas_range_target: (U256, U256),
|
||||||
extra_data: Bytes,
|
extra_data: Bytes,
|
||||||
@ -238,7 +235,6 @@ impl<'x> OpenBlock<'x> {
|
|||||||
engine: engine,
|
engine: engine,
|
||||||
vm_factory: vm_factory,
|
vm_factory: vm_factory,
|
||||||
last_hashes: last_hashes,
|
last_hashes: last_hashes,
|
||||||
dao_rescue_block_gas_limit: dao_rescue_block_gas_limit,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
r.block.base.header.parent_hash = parent.hash();
|
r.block.base.header.parent_hash = parent.hash();
|
||||||
@ -295,7 +291,6 @@ impl<'x> OpenBlock<'x> {
|
|||||||
/// Get the environment info concerning this block.
|
/// Get the environment info concerning this block.
|
||||||
pub fn env_info(&self) -> EnvInfo {
|
pub fn env_info(&self) -> EnvInfo {
|
||||||
// TODO: memoise.
|
// TODO: memoise.
|
||||||
const SOFT_FORK_BLOCK: u64 = 1_800_000;
|
|
||||||
EnvInfo {
|
EnvInfo {
|
||||||
number: self.block.base.header.number,
|
number: self.block.base.header.number,
|
||||||
author: self.block.base.header.author.clone(),
|
author: self.block.base.header.author.clone(),
|
||||||
@ -304,7 +299,6 @@ impl<'x> OpenBlock<'x> {
|
|||||||
last_hashes: self.last_hashes.clone(), // TODO: should be a reference.
|
last_hashes: self.last_hashes.clone(), // TODO: should be a reference.
|
||||||
gas_used: self.block.receipts.last().map_or(U256::zero(), |r| r.gas_used),
|
gas_used: self.block.receipts.last().map_or(U256::zero(), |r| r.gas_used),
|
||||||
gas_limit: self.block.base.header.gas_limit.clone(),
|
gas_limit: self.block.base.header.gas_limit.clone(),
|
||||||
dao_rescue_block_gas_limit: if self.block.base.header.number == SOFT_FORK_BLOCK { Some(self.block.base.header.gas_limit) } else { self.dao_rescue_block_gas_limit },
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -351,7 +345,6 @@ impl<'x> OpenBlock<'x> {
|
|||||||
block: s.block,
|
block: s.block,
|
||||||
uncle_bytes: uncle_bytes,
|
uncle_bytes: uncle_bytes,
|
||||||
last_hashes: s.last_hashes,
|
last_hashes: s.last_hashes,
|
||||||
dao_rescue_block_gas_limit: s.dao_rescue_block_gas_limit,
|
|
||||||
unclosed_state: unclosed_state,
|
unclosed_state: unclosed_state,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -411,7 +404,6 @@ impl ClosedBlock {
|
|||||||
engine: engine,
|
engine: engine,
|
||||||
vm_factory: vm_factory,
|
vm_factory: vm_factory,
|
||||||
last_hashes: self.last_hashes,
|
last_hashes: self.last_hashes,
|
||||||
dao_rescue_block_gas_limit: self.dao_rescue_block_gas_limit,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -481,7 +473,6 @@ pub fn enact(
|
|||||||
db: Box<JournalDB>,
|
db: Box<JournalDB>,
|
||||||
parent: &Header,
|
parent: &Header,
|
||||||
last_hashes: LastHashes,
|
last_hashes: LastHashes,
|
||||||
dao_rescue_block_gas_limit: Option<U256>,
|
|
||||||
vm_factory: &EvmFactory,
|
vm_factory: &EvmFactory,
|
||||||
trie_factory: TrieFactory,
|
trie_factory: TrieFactory,
|
||||||
) -> Result<LockedBlock, Error> {
|
) -> Result<LockedBlock, Error> {
|
||||||
@ -492,7 +483,7 @@ pub fn enact(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut b = try!(OpenBlock::new(engine, vm_factory, trie_factory, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, header.author().clone(), (3141562.into(), 31415620.into()), header.extra_data().clone()));
|
let mut b = try!(OpenBlock::new(engine, vm_factory, trie_factory, tracing, db, parent, last_hashes, header.author().clone(), (3141562.into(), 31415620.into()), header.extra_data().clone()));
|
||||||
b.set_difficulty(*header.difficulty());
|
b.set_difficulty(*header.difficulty());
|
||||||
b.set_gas_limit(*header.gas_limit());
|
b.set_gas_limit(*header.gas_limit());
|
||||||
b.set_timestamp(header.timestamp());
|
b.set_timestamp(header.timestamp());
|
||||||
@ -510,13 +501,12 @@ pub fn enact_bytes(
|
|||||||
db: Box<JournalDB>,
|
db: Box<JournalDB>,
|
||||||
parent: &Header,
|
parent: &Header,
|
||||||
last_hashes: LastHashes,
|
last_hashes: LastHashes,
|
||||||
dao_rescue_block_gas_limit: Option<U256>,
|
|
||||||
vm_factory: &EvmFactory,
|
vm_factory: &EvmFactory,
|
||||||
trie_factory: TrieFactory,
|
trie_factory: TrieFactory,
|
||||||
) -> Result<LockedBlock, Error> {
|
) -> Result<LockedBlock, Error> {
|
||||||
let block = BlockView::new(block_bytes);
|
let block = BlockView::new(block_bytes);
|
||||||
let header = block.header();
|
let header = block.header();
|
||||||
enact(&header, &block.transactions(), &block.uncles(), engine, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, vm_factory, trie_factory)
|
enact(&header, &block.transactions(), &block.uncles(), engine, tracing, db, parent, last_hashes, vm_factory, trie_factory)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
|
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
|
||||||
@ -528,12 +518,11 @@ pub fn enact_verified(
|
|||||||
db: Box<JournalDB>,
|
db: Box<JournalDB>,
|
||||||
parent: &Header,
|
parent: &Header,
|
||||||
last_hashes: LastHashes,
|
last_hashes: LastHashes,
|
||||||
dao_rescue_block_gas_limit: Option<U256>,
|
|
||||||
vm_factory: &EvmFactory,
|
vm_factory: &EvmFactory,
|
||||||
trie_factory: TrieFactory,
|
trie_factory: TrieFactory,
|
||||||
) -> Result<LockedBlock, Error> {
|
) -> Result<LockedBlock, Error> {
|
||||||
let view = BlockView::new(&block.bytes);
|
let view = BlockView::new(&block.bytes);
|
||||||
enact(&block.header, &block.transactions, &view.uncles(), engine, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, vm_factory, trie_factory)
|
enact(&block.header, &block.transactions, &view.uncles(), engine, tracing, db, parent, last_hashes, vm_factory, trie_factory)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards
|
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards
|
||||||
@ -545,12 +534,11 @@ pub fn enact_and_seal(
|
|||||||
db: Box<JournalDB>,
|
db: Box<JournalDB>,
|
||||||
parent: &Header,
|
parent: &Header,
|
||||||
last_hashes: LastHashes,
|
last_hashes: LastHashes,
|
||||||
dao_rescue_block_gas_limit: Option<U256>,
|
|
||||||
vm_factory: &EvmFactory,
|
vm_factory: &EvmFactory,
|
||||||
trie_factory: TrieFactory,
|
trie_factory: TrieFactory,
|
||||||
) -> Result<SealedBlock, Error> {
|
) -> Result<SealedBlock, Error> {
|
||||||
let header = BlockView::new(block_bytes).header_view();
|
let header = BlockView::new(block_bytes).header_view();
|
||||||
Ok(try!(try!(enact_bytes(block_bytes, engine, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, vm_factory, trie_factory)).seal(engine, header.seal())))
|
Ok(try!(try!(enact_bytes(block_bytes, engine, tracing, db, parent, last_hashes, vm_factory, trie_factory)).seal(engine, header.seal())))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@ -570,7 +558,7 @@ mod tests {
|
|||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let last_hashes = vec![genesis_header.hash()];
|
let last_hashes = vec![genesis_header.hash()];
|
||||||
let vm_factory = Default::default();
|
let vm_factory = Default::default();
|
||||||
let b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, None, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
let b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||||
let b = b.close_and_lock();
|
let b = b.close_and_lock();
|
||||||
let _ = b.seal(engine.deref(), vec![]);
|
let _ = b.seal(engine.deref(), vec![]);
|
||||||
}
|
}
|
||||||
@ -586,7 +574,7 @@ mod tests {
|
|||||||
let mut db = db_result.take();
|
let mut db = db_result.take();
|
||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let vm_factory = Default::default();
|
let vm_factory = Default::default();
|
||||||
let b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, vec![genesis_header.hash()], None, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap()
|
let b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, vec![genesis_header.hash()], Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap()
|
||||||
.close_and_lock().seal(engine.deref(), vec![]).unwrap();
|
.close_and_lock().seal(engine.deref(), vec![]).unwrap();
|
||||||
let orig_bytes = b.rlp_bytes();
|
let orig_bytes = b.rlp_bytes();
|
||||||
let orig_db = b.drain();
|
let orig_db = b.drain();
|
||||||
@ -594,7 +582,7 @@ mod tests {
|
|||||||
let mut db_result = get_temp_journal_db();
|
let mut db_result = get_temp_journal_db();
|
||||||
let mut db = db_result.take();
|
let mut db = db_result.take();
|
||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], None, &Default::default(), Default::default()).unwrap();
|
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], &Default::default(), Default::default()).unwrap();
|
||||||
|
|
||||||
assert_eq!(e.rlp_bytes(), orig_bytes);
|
assert_eq!(e.rlp_bytes(), orig_bytes);
|
||||||
|
|
||||||
@ -614,7 +602,7 @@ mod tests {
|
|||||||
let mut db = db_result.take();
|
let mut db = db_result.take();
|
||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let vm_factory = Default::default();
|
let vm_factory = Default::default();
|
||||||
let mut open_block = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, vec![genesis_header.hash()], None, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
let mut open_block = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, vec![genesis_header.hash()], Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||||
let mut uncle1_header = Header::new();
|
let mut uncle1_header = Header::new();
|
||||||
uncle1_header.extra_data = b"uncle1".to_vec();
|
uncle1_header.extra_data = b"uncle1".to_vec();
|
||||||
let mut uncle2_header = Header::new();
|
let mut uncle2_header = Header::new();
|
||||||
@ -629,7 +617,7 @@ mod tests {
|
|||||||
let mut db_result = get_temp_journal_db();
|
let mut db_result = get_temp_journal_db();
|
||||||
let mut db = db_result.take();
|
let mut db = db_result.take();
|
||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], None, &Default::default(), Default::default()).unwrap();
|
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], &Default::default(), Default::default()).unwrap();
|
||||||
|
|
||||||
let bytes = e.rlp_bytes();
|
let bytes = e.rlp_bytes();
|
||||||
assert_eq!(bytes, orig_bytes);
|
assert_eq!(bytes, orig_bytes);
|
||||||
|
@ -82,7 +82,6 @@ pub use types::blockchain_info::BlockChainInfo;
|
|||||||
pub use types::block_status::BlockStatus;
|
pub use types::block_status::BlockStatus;
|
||||||
pub use blockchain::CacheSize as BlockChainCacheSize;
|
pub use blockchain::CacheSize as BlockChainCacheSize;
|
||||||
|
|
||||||
|
|
||||||
const MAX_TX_QUEUE_SIZE: usize = 4096;
|
const MAX_TX_QUEUE_SIZE: usize = 4096;
|
||||||
const MAX_QUEUE_SIZE_TO_SLEEP_ON: usize = 2;
|
const MAX_QUEUE_SIZE_TO_SLEEP_ON: usize = 2;
|
||||||
|
|
||||||
@ -287,7 +286,7 @@ impl Client {
|
|||||||
let last_hashes = self.build_last_hashes(header.parent_hash.clone());
|
let last_hashes = self.build_last_hashes(header.parent_hash.clone());
|
||||||
let db = self.state_db.lock().unwrap().boxed_clone();
|
let db = self.state_db.lock().unwrap().boxed_clone();
|
||||||
|
|
||||||
let enact_result = enact_verified(&block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, self.dao_rescue_block_gas_limit(header.parent_hash.clone()), &self.vm_factory, self.trie_factory.clone());
|
let enact_result = enact_verified(&block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, &self.vm_factory, self.trie_factory.clone());
|
||||||
if let Err(e) = enact_result {
|
if let Err(e) = enact_result {
|
||||||
warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
||||||
return Err(());
|
return Err(());
|
||||||
@ -483,7 +482,7 @@ impl Client {
|
|||||||
HeaderView::new(&self.best_block_header()).state_root(),
|
HeaderView::new(&self.best_block_header()).state_root(),
|
||||||
self.engine.account_start_nonce(),
|
self.engine.account_start_nonce(),
|
||||||
self.trie_factory.clone())
|
self.trie_factory.clone())
|
||||||
.expect("State root of best block header always valid.")
|
.expect("State root of best block header always valid.")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get info on the cache.
|
/// Get info on the cache.
|
||||||
@ -594,8 +593,6 @@ impl Client {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Ipc)]
|
|
||||||
#[ipc(client_ident="RemoteClient")]
|
|
||||||
impl BlockChainClient for Client {
|
impl BlockChainClient for Client {
|
||||||
fn call(&self, t: &SignedTransaction, analytics: CallAnalytics) -> Result<Executed, ExecutionError> {
|
fn call(&self, t: &SignedTransaction, analytics: CallAnalytics) -> Result<Executed, ExecutionError> {
|
||||||
let header = self.block_header(BlockID::Latest).unwrap();
|
let header = self.block_header(BlockID::Latest).unwrap();
|
||||||
@ -609,7 +606,6 @@ impl BlockChainClient for Client {
|
|||||||
last_hashes: last_hashes,
|
last_hashes: last_hashes,
|
||||||
gas_used: U256::zero(),
|
gas_used: U256::zero(),
|
||||||
gas_limit: U256::max_value(),
|
gas_limit: U256::max_value(),
|
||||||
dao_rescue_block_gas_limit: self.dao_rescue_block_gas_limit(view.parent_hash()),
|
|
||||||
};
|
};
|
||||||
// that's just a copy of the state.
|
// that's just a copy of the state.
|
||||||
let mut state = self.state();
|
let mut state = self.state();
|
||||||
@ -708,7 +704,7 @@ impl BlockChainClient for Client {
|
|||||||
fn transaction_receipt(&self, id: TransactionID) -> Option<LocalizedReceipt> {
|
fn transaction_receipt(&self, id: TransactionID) -> Option<LocalizedReceipt> {
|
||||||
self.transaction_address(id).and_then(|address| {
|
self.transaction_address(id).and_then(|address| {
|
||||||
let t = self.chain.block(&address.block_hash)
|
let t = self.chain.block(&address.block_hash)
|
||||||
.and_then(|block| BlockView::new(&block).localized_transaction_at(address.index));
|
.and_then(|block| BlockView::new(&block).localized_transaction_at(address.index));
|
||||||
|
|
||||||
match (t, self.chain.transaction_receipt(&address)) {
|
match (t, self.chain.transaction_receipt(&address)) {
|
||||||
(Some(tx), Some(receipt)) => {
|
(Some(tx), Some(receipt)) => {
|
||||||
@ -811,42 +807,42 @@ impl BlockChainClient for Client {
|
|||||||
// TODO: lock blockchain only once
|
// TODO: lock blockchain only once
|
||||||
|
|
||||||
let mut blocks = filter.bloom_possibilities().iter()
|
let mut blocks = filter.bloom_possibilities().iter()
|
||||||
.filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone()))
|
.filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone()))
|
||||||
.flat_map(|m| m)
|
.flat_map(|m| m)
|
||||||
// remove duplicate elements
|
// remove duplicate elements
|
||||||
.collect::<HashSet<u64>>()
|
.collect::<HashSet<u64>>()
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.collect::<Vec<u64>>();
|
.collect::<Vec<u64>>();
|
||||||
|
|
||||||
blocks.sort();
|
blocks.sort();
|
||||||
|
|
||||||
blocks.into_iter()
|
blocks.into_iter()
|
||||||
.filter_map(|number| self.chain.block_hash(number).map(|hash| (number, hash)))
|
.filter_map(|number| self.chain.block_hash(number).map(|hash| (number, hash)))
|
||||||
.filter_map(|(number, hash)| self.chain.block_receipts(&hash).map(|r| (number, hash, r.receipts)))
|
.filter_map(|(number, hash)| self.chain.block_receipts(&hash).map(|r| (number, hash, r.receipts)))
|
||||||
.filter_map(|(number, hash, receipts)| self.chain.block(&hash).map(|ref b| (number, hash, receipts, BlockView::new(b).transaction_hashes())))
|
.filter_map(|(number, hash, receipts)| self.chain.block(&hash).map(|ref b| (number, hash, receipts, BlockView::new(b).transaction_hashes())))
|
||||||
.flat_map(|(number, hash, receipts, hashes)| {
|
.flat_map(|(number, hash, receipts, hashes)| {
|
||||||
let mut log_index = 0;
|
let mut log_index = 0;
|
||||||
receipts.into_iter()
|
receipts.into_iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.flat_map(|(index, receipt)| {
|
.flat_map(|(index, receipt)| {
|
||||||
log_index += receipt.logs.len();
|
log_index += receipt.logs.len();
|
||||||
receipt.logs.into_iter()
|
receipt.logs.into_iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.filter(|tuple| filter.matches(&tuple.1))
|
.filter(|tuple| filter.matches(&tuple.1))
|
||||||
.map(|(i, log)| LocalizedLogEntry {
|
.map(|(i, log)| LocalizedLogEntry {
|
||||||
entry: log,
|
entry: log,
|
||||||
block_hash: hash.clone(),
|
block_hash: hash.clone(),
|
||||||
block_number: number,
|
block_number: number,
|
||||||
transaction_hash: hashes.get(index).cloned().unwrap_or_else(H256::new),
|
transaction_hash: hashes.get(index).cloned().unwrap_or_else(H256::new),
|
||||||
transaction_index: index,
|
transaction_index: index,
|
||||||
log_index: log_index + i
|
log_index: log_index + i
|
||||||
})
|
})
|
||||||
.collect::<Vec<LocalizedLogEntry>>()
|
.collect::<Vec<LocalizedLogEntry>>()
|
||||||
})
|
|
||||||
.collect::<Vec<LocalizedLogEntry>>()
|
|
||||||
|
|
||||||
})
|
})
|
||||||
.collect()
|
.collect::<Vec<LocalizedLogEntry>>()
|
||||||
|
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn filter_traces(&self, filter: TraceFilter) -> Option<Vec<LocalizedTrace>> {
|
fn filter_traces(&self, filter: TraceFilter) -> Option<Vec<LocalizedTrace>> {
|
||||||
@ -870,23 +866,23 @@ impl BlockChainClient for Client {
|
|||||||
fn trace(&self, trace: TraceId) -> Option<LocalizedTrace> {
|
fn trace(&self, trace: TraceId) -> Option<LocalizedTrace> {
|
||||||
let trace_address = trace.address;
|
let trace_address = trace.address;
|
||||||
self.transaction_address(trace.transaction)
|
self.transaction_address(trace.transaction)
|
||||||
.and_then(|tx_address| {
|
.and_then(|tx_address| {
|
||||||
self.block_number(BlockID::Hash(tx_address.block_hash))
|
self.block_number(BlockID::Hash(tx_address.block_hash))
|
||||||
.and_then(|number| self.tracedb.trace(number, tx_address.index, trace_address))
|
.and_then(|number| self.tracedb.trace(number, tx_address.index, trace_address))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn transaction_traces(&self, transaction: TransactionID) -> Option<Vec<LocalizedTrace>> {
|
fn transaction_traces(&self, transaction: TransactionID) -> Option<Vec<LocalizedTrace>> {
|
||||||
self.transaction_address(transaction)
|
self.transaction_address(transaction)
|
||||||
.and_then(|tx_address| {
|
.and_then(|tx_address| {
|
||||||
self.block_number(BlockID::Hash(tx_address.block_hash))
|
self.block_number(BlockID::Hash(tx_address.block_hash))
|
||||||
.and_then(|number| self.tracedb.transaction_traces(number, tx_address.index))
|
.and_then(|number| self.tracedb.transaction_traces(number, tx_address.index))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_traces(&self, block: BlockID) -> Option<Vec<LocalizedTrace>> {
|
fn block_traces(&self, block: BlockID) -> Option<Vec<LocalizedTrace>> {
|
||||||
self.block_number(block)
|
self.block_number(block)
|
||||||
.and_then(|number| self.tracedb.block_traces(number))
|
.and_then(|number| self.tracedb.block_traces(number))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn last_hashes(&self) -> LastHashes {
|
fn last_hashes(&self) -> LastHashes {
|
||||||
@ -900,9 +896,9 @@ impl BlockChainClient for Client {
|
|||||||
};
|
};
|
||||||
|
|
||||||
self.miner.import_transactions(self, transactions, &fetch_account)
|
self.miner.import_transactions(self, transactions, &fetch_account)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|res| res.map_err(|e| e.into()))
|
.map(|res| res.map_err(|e| e.into()))
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn queue_transactions(&self, transactions: Vec<Bytes>) {
|
fn queue_transactions(&self, transactions: Vec<Bytes>) {
|
||||||
@ -939,7 +935,6 @@ impl MiningBlockChainClient for Client {
|
|||||||
self.state_db.lock().unwrap().boxed_clone(),
|
self.state_db.lock().unwrap().boxed_clone(),
|
||||||
&self.chain.block_header(&h).expect("h is best block hash: so it's header must exist: qed"),
|
&self.chain.block_header(&h).expect("h is best block hash: so it's header must exist: qed"),
|
||||||
self.build_last_hashes(h.clone()),
|
self.build_last_hashes(h.clone()),
|
||||||
self.dao_rescue_block_gas_limit(h.clone()),
|
|
||||||
author,
|
author,
|
||||||
gas_range_target,
|
gas_range_target,
|
||||||
extra_data,
|
extra_data,
|
||||||
@ -947,13 +942,13 @@ impl MiningBlockChainClient for Client {
|
|||||||
|
|
||||||
// Add uncles
|
// Add uncles
|
||||||
self.chain
|
self.chain
|
||||||
.find_uncle_headers(&h, engine.maximum_uncle_age())
|
.find_uncle_headers(&h, engine.maximum_uncle_age())
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.take(engine.maximum_uncle_count())
|
.take(engine.maximum_uncle_count())
|
||||||
.foreach(|h| {
|
.foreach(|h| {
|
||||||
open_block.push_uncle(h).unwrap();
|
open_block.push_uncle(h).unwrap();
|
||||||
});
|
});
|
||||||
|
|
||||||
open_block
|
open_block
|
||||||
}
|
}
|
||||||
|
@ -42,7 +42,7 @@ use header::{BlockNumber};
|
|||||||
use transaction::{LocalizedTransaction, SignedTransaction};
|
use transaction::{LocalizedTransaction, SignedTransaction};
|
||||||
use log_entry::LocalizedLogEntry;
|
use log_entry::LocalizedLogEntry;
|
||||||
use filter::Filter;
|
use filter::Filter;
|
||||||
use views::{HeaderView, BlockView};
|
use views::{BlockView};
|
||||||
use error::{ImportResult, ExecutionError};
|
use error::{ImportResult, ExecutionError};
|
||||||
use receipt::LocalizedReceipt;
|
use receipt::LocalizedReceipt;
|
||||||
use trace::LocalizedTrace;
|
use trace::LocalizedTrace;
|
||||||
@ -216,28 +216,6 @@ pub trait BlockChainClient : Sync + Send {
|
|||||||
Err(())
|
Err(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get `Some` gas limit of SOFT_FORK_BLOCK, or `None` if chain is not yet that long.
|
|
||||||
fn dao_rescue_block_gas_limit(&self, chain_hash: H256) -> Option<U256> {
|
|
||||||
const SOFT_FORK_BLOCK: u64 = 1800000;
|
|
||||||
// shortcut if the canon chain is already known.
|
|
||||||
if self.chain_info().best_block_number > SOFT_FORK_BLOCK + 1000 {
|
|
||||||
return self.block_header(BlockID::Number(SOFT_FORK_BLOCK)).map(|header| HeaderView::new(&header).gas_limit());
|
|
||||||
}
|
|
||||||
// otherwise check according to `chain_hash`.
|
|
||||||
if let Some(mut header) = self.block_header(BlockID::Hash(chain_hash)) {
|
|
||||||
if HeaderView::new(&header).number() < SOFT_FORK_BLOCK {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
while HeaderView::new(&header).number() != SOFT_FORK_BLOCK {
|
|
||||||
header = self.block_header(BlockID::Hash(HeaderView::new(&header).parent_hash())).expect("chain is complete; parent of chain entry must be in chain; qed");
|
|
||||||
}
|
|
||||||
Some(HeaderView::new(&header).gas_limit())
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Extended client interface used for mining
|
/// Extended client interface used for mining
|
||||||
|
@ -39,9 +39,6 @@ pub struct EnvInfo {
|
|||||||
pub last_hashes: LastHashes,
|
pub last_hashes: LastHashes,
|
||||||
/// The gas used.
|
/// The gas used.
|
||||||
pub gas_used: U256,
|
pub gas_used: U256,
|
||||||
|
|
||||||
/// Block gas limit at DAO rescue block SOFT_FORK_BLOCK or None if not yet there.
|
|
||||||
pub dao_rescue_block_gas_limit: Option<U256>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for EnvInfo {
|
impl Default for EnvInfo {
|
||||||
@ -54,7 +51,6 @@ impl Default for EnvInfo {
|
|||||||
gas_limit: 0.into(),
|
gas_limit: 0.into(),
|
||||||
last_hashes: vec![],
|
last_hashes: vec![],
|
||||||
gas_used: 0.into(),
|
gas_used: 0.into(),
|
||||||
dao_rescue_block_gas_limit: None,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -70,7 +66,6 @@ impl From<ethjson::vm::Env> for EnvInfo {
|
|||||||
timestamp: e.timestamp.into(),
|
timestamp: e.timestamp.into(),
|
||||||
last_hashes: (1..cmp::min(number + 1, 257)).map(|i| format!("{}", number - i).as_bytes().sha3()).collect(),
|
last_hashes: (1..cmp::min(number + 1, 257)).map(|i| format!("{}", number - i).as_bytes().sha3()).collect(),
|
||||||
gas_used: U256::zero(),
|
gas_used: U256::zero(),
|
||||||
dao_rescue_block_gas_limit: None,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -39,8 +39,6 @@ pub struct EthashParams {
|
|||||||
pub registrar: Address,
|
pub registrar: Address,
|
||||||
/// Homestead transition block number.
|
/// Homestead transition block number.
|
||||||
pub frontier_compatibility_mode_limit: u64,
|
pub frontier_compatibility_mode_limit: u64,
|
||||||
/// Enable the soft-fork logic.
|
|
||||||
pub dao_rescue_soft_fork: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<ethjson::spec::EthashParams> for EthashParams {
|
impl From<ethjson::spec::EthashParams> for EthashParams {
|
||||||
@ -53,7 +51,6 @@ impl From<ethjson::spec::EthashParams> for EthashParams {
|
|||||||
block_reward: p.block_reward.into(),
|
block_reward: p.block_reward.into(),
|
||||||
registrar: p.registrar.into(),
|
registrar: p.registrar.into(),
|
||||||
frontier_compatibility_mode_limit: p.frontier_compatibility_mode_limit.into(),
|
frontier_compatibility_mode_limit: p.frontier_compatibility_mode_limit.into(),
|
||||||
dao_rescue_soft_fork: p.dao_rescue_soft_fork.into(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -102,11 +99,7 @@ impl Engine for Ethash {
|
|||||||
if env_info.number < self.ethash_params.frontier_compatibility_mode_limit {
|
if env_info.number < self.ethash_params.frontier_compatibility_mode_limit {
|
||||||
Schedule::new_frontier()
|
Schedule::new_frontier()
|
||||||
} else {
|
} else {
|
||||||
let mut s = Schedule::new_homestead();
|
Schedule::new_homestead()
|
||||||
if self.ethash_params.dao_rescue_soft_fork {
|
|
||||||
s.reject_dao_transactions = env_info.dao_rescue_block_gas_limit.map_or(false, |x| x <= 4_000_000.into());
|
|
||||||
}
|
|
||||||
s
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -325,7 +318,7 @@ mod tests {
|
|||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let last_hashes = vec![genesis_header.hash()];
|
let last_hashes = vec![genesis_header.hash()];
|
||||||
let vm_factory = Default::default();
|
let vm_factory = Default::default();
|
||||||
let b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, None, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
let b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||||
let b = b.close();
|
let b = b.close();
|
||||||
assert_eq!(b.state().balance(&Address::zero()), U256::from_str("4563918244f40000").unwrap());
|
assert_eq!(b.state().balance(&Address::zero()), U256::from_str("4563918244f40000").unwrap());
|
||||||
}
|
}
|
||||||
@ -340,7 +333,7 @@ mod tests {
|
|||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let last_hashes = vec![genesis_header.hash()];
|
let last_hashes = vec![genesis_header.hash()];
|
||||||
let vm_factory = Default::default();
|
let vm_factory = Default::default();
|
||||||
let mut b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, None, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
let mut b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||||
let mut uncle = Header::new();
|
let mut uncle = Header::new();
|
||||||
let uncle_author = address_from_hex("ef2d6d194084c2de36e0dabfce45d046b37d1106");
|
let uncle_author = address_from_hex("ef2d6d194084c2de36e0dabfce45d046b37d1106");
|
||||||
uncle.author = uncle_author.clone();
|
uncle.author = uncle_author.clone();
|
||||||
@ -369,7 +362,6 @@ mod tests {
|
|||||||
last_hashes: vec![],
|
last_hashes: vec![],
|
||||||
gas_used: 0.into(),
|
gas_used: 0.into(),
|
||||||
gas_limit: 0.into(),
|
gas_limit: 0.into(),
|
||||||
dao_rescue_block_gas_limit: None,
|
|
||||||
});
|
});
|
||||||
|
|
||||||
assert!(schedule.stack_limit > 0);
|
assert!(schedule.stack_limit > 0);
|
||||||
@ -382,7 +374,6 @@ mod tests {
|
|||||||
last_hashes: vec![],
|
last_hashes: vec![],
|
||||||
gas_used: 0.into(),
|
gas_used: 0.into(),
|
||||||
gas_limit: 0.into(),
|
gas_limit: 0.into(),
|
||||||
dao_rescue_block_gas_limit: None,
|
|
||||||
});
|
});
|
||||||
|
|
||||||
assert!(!schedule.have_delegate_call);
|
assert!(!schedule.have_delegate_call);
|
||||||
|
@ -33,11 +33,8 @@ use super::spec::*;
|
|||||||
pub fn new_olympic() -> Spec { Spec::load(include_bytes!("../../res/ethereum/olympic.json")) }
|
pub fn new_olympic() -> Spec { Spec::load(include_bytes!("../../res/ethereum/olympic.json")) }
|
||||||
|
|
||||||
/// Create a new Frontier mainnet chain spec.
|
/// Create a new Frontier mainnet chain spec.
|
||||||
pub fn new_frontier(dao_rescue: bool) -> Spec {
|
pub fn new_frontier() -> Spec {
|
||||||
Spec::load(match dao_rescue {
|
Spec::load(include_bytes!("../../res/ethereum/frontier.json"))
|
||||||
true => include_bytes!("../../res/ethereum/frontier_dao_rescue.json"),
|
|
||||||
false => include_bytes!("../../res/ethereum/frontier.json"),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new Frontier chain spec as though it never changes to Homestead.
|
/// Create a new Frontier chain spec as though it never changes to Homestead.
|
||||||
@ -89,7 +86,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn frontier() {
|
fn frontier() {
|
||||||
let frontier = new_frontier(true);
|
let frontier = new_frontier();
|
||||||
|
|
||||||
assert_eq!(frontier.state_root(), H256::from_str("d7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544").unwrap());
|
assert_eq!(frontier.state_root(), H256::from_str("d7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544").unwrap());
|
||||||
let genesis = frontier.genesis_block();
|
let genesis = frontier.genesis_block();
|
||||||
|
@ -319,7 +319,6 @@ mod tests {
|
|||||||
last_hashes: vec![],
|
last_hashes: vec![],
|
||||||
gas_used: 0.into(),
|
gas_used: 0.into(),
|
||||||
gas_limit: 0.into(),
|
gas_limit: 0.into(),
|
||||||
dao_rescue_block_gas_limit: None,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use util::migration::Migration;
|
use util::migration::SimpleMigration;
|
||||||
|
|
||||||
/// This migration reduces the sizes of keys and moves `ExtrasIndex` byte from back to the front.
|
/// This migration reduces the sizes of keys and moves `ExtrasIndex` byte from back to the front.
|
||||||
pub struct ToV6;
|
pub struct ToV6;
|
||||||
@ -17,7 +17,7 @@ impl ToV6 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Migration for ToV6 {
|
impl SimpleMigration for ToV6 {
|
||||||
fn version(&self) -> u32 {
|
fn version(&self) -> u32 {
|
||||||
6
|
6
|
||||||
}
|
}
|
||||||
|
@ -356,7 +356,6 @@ impl MinerService for Miner {
|
|||||||
last_hashes: last_hashes,
|
last_hashes: last_hashes,
|
||||||
gas_used: U256::zero(),
|
gas_used: U256::zero(),
|
||||||
gas_limit: U256::max_value(),
|
gas_limit: U256::max_value(),
|
||||||
dao_rescue_block_gas_limit: chain.dao_rescue_block_gas_limit(header.parent_hash().clone()),
|
|
||||||
};
|
};
|
||||||
// that's just a copy of the state.
|
// that's just a copy of the state.
|
||||||
let mut state = block.state().clone();
|
let mut state = block.state().clone();
|
||||||
|
@ -33,7 +33,7 @@
|
|||||||
//! use ethcore::miner::{Miner, MinerService};
|
//! use ethcore::miner::{Miner, MinerService};
|
||||||
//!
|
//!
|
||||||
//! fn main() {
|
//! fn main() {
|
||||||
//! let miner: Miner = Miner::with_spec(ethereum::new_frontier(true));
|
//! let miner: Miner = Miner::with_spec(ethereum::new_frontier());
|
||||||
//! // get status
|
//! // get status
|
||||||
//! assert_eq!(miner.status().transactions_in_pending_queue, 0);
|
//! assert_eq!(miner.status().transactions_in_pending_queue, 0);
|
||||||
//!
|
//!
|
||||||
|
@ -180,7 +180,6 @@ pub fn generate_dummy_client_with_spec_and_data<F>(get_test_spec: F, block_numbe
|
|||||||
db,
|
db,
|
||||||
&last_header,
|
&last_header,
|
||||||
last_hashes.clone(),
|
last_hashes.clone(),
|
||||||
None,
|
|
||||||
author.clone(),
|
author.clone(),
|
||||||
(3141562.into(), 31415620.into()),
|
(3141562.into(), 31415620.into()),
|
||||||
vec![]
|
vec![]
|
||||||
|
@ -727,7 +727,6 @@ pub fn get_ipc_meta_items(attr: &ast::Attribute) -> Option<&[P<ast::MetaItem>]>
|
|||||||
fn client_ident_renamed(cx: &ExtCtxt, item: &ast::Item) -> Option<String> {
|
fn client_ident_renamed(cx: &ExtCtxt, item: &ast::Item) -> Option<String> {
|
||||||
for meta_items in item.attrs().iter().filter_map(get_ipc_meta_items) {
|
for meta_items in item.attrs().iter().filter_map(get_ipc_meta_items) {
|
||||||
for meta_item in meta_items {
|
for meta_item in meta_items {
|
||||||
let span = meta_item.span;
|
|
||||||
match meta_item.node {
|
match meta_item.node {
|
||||||
ast::MetaItemKind::NameValue(ref name, ref lit) if name == &"client_ident" => {
|
ast::MetaItemKind::NameValue(ref name, ref lit) if name == &"client_ident" => {
|
||||||
if let Ok(s) = get_str_from_lit(cx, name, lit) {
|
if let Ok(s) = get_str_from_lit(cx, name, lit) {
|
||||||
|
@ -53,8 +53,7 @@ mod tests {
|
|||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"blockReward": "0x4563918244F40000",
|
"blockReward": "0x4563918244F40000",
|
||||||
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
||||||
"frontierCompatibilityModeLimit" : "0x",
|
"frontierCompatibilityModeLimit" : "0x"
|
||||||
"daoRescueSoftFork": true
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}"#;
|
}"#;
|
||||||
|
@ -42,9 +42,6 @@ pub struct EthashParams {
|
|||||||
/// Homestead transition block number.
|
/// Homestead transition block number.
|
||||||
#[serde(rename="frontierCompatibilityModeLimit")]
|
#[serde(rename="frontierCompatibilityModeLimit")]
|
||||||
pub frontier_compatibility_mode_limit: Uint,
|
pub frontier_compatibility_mode_limit: Uint,
|
||||||
/// DAO rescue soft-fork?
|
|
||||||
#[serde(rename="daoRescueSoftFork")]
|
|
||||||
pub dao_rescue_soft_fork: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ethash engine deserialization.
|
/// Ethash engine deserialization.
|
||||||
@ -69,8 +66,7 @@ mod tests {
|
|||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"blockReward": "0x4563918244F40000",
|
"blockReward": "0x4563918244F40000",
|
||||||
"registrar": "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
"registrar": "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
||||||
"frontierCompatibilityModeLimit": "0x42",
|
"frontierCompatibilityModeLimit": "0x42"
|
||||||
"daoRescueSoftFork": true
|
|
||||||
}
|
}
|
||||||
}"#;
|
}"#;
|
||||||
|
|
||||||
|
@ -63,8 +63,7 @@ mod tests {
|
|||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"blockReward": "0x4563918244F40000",
|
"blockReward": "0x4563918244F40000",
|
||||||
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
||||||
"frontierCompatibilityModeLimit" : "0x",
|
"frontierCompatibilityModeLimit" : "0x"
|
||||||
"daoRescueSoftFork": false
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -57,9 +57,8 @@ Operating Options:
|
|||||||
--fork POLICY Specifies the client's fork policy. POLICY must be
|
--fork POLICY Specifies the client's fork policy. POLICY must be
|
||||||
one of:
|
one of:
|
||||||
dogmatic - sticks rigidly to the standard chain.
|
dogmatic - sticks rigidly to the standard chain.
|
||||||
dao-soft - votes for the DAO-rescue soft-fork.
|
none - goes with whatever fork is decided but
|
||||||
normal - goes with whatever fork is decided but
|
votes for none. [default: none].
|
||||||
votes for none. [default: normal].
|
|
||||||
|
|
||||||
Account Options:
|
Account Options:
|
||||||
--unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution.
|
--unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution.
|
||||||
|
@ -49,8 +49,7 @@ pub struct Directories {
|
|||||||
|
|
||||||
#[derive(Eq, PartialEq, Debug)]
|
#[derive(Eq, PartialEq, Debug)]
|
||||||
pub enum Policy {
|
pub enum Policy {
|
||||||
DaoSoft,
|
None,
|
||||||
Normal,
|
|
||||||
Dogmatic,
|
Dogmatic,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -135,33 +134,24 @@ impl Configuration {
|
|||||||
|
|
||||||
pub fn policy(&self) -> Policy {
|
pub fn policy(&self) -> Policy {
|
||||||
match self.args.flag_fork.as_str() {
|
match self.args.flag_fork.as_str() {
|
||||||
"dao-soft" => Policy::DaoSoft,
|
"none" => Policy::None,
|
||||||
"normal" => Policy::Normal,
|
|
||||||
"dogmatic" => Policy::Dogmatic,
|
"dogmatic" => Policy::Dogmatic,
|
||||||
x => die!("{}: Invalid value given for --policy option. Use --help for more info.", x)
|
x => die!("{}: Invalid value given for --policy option. Use --help for more info.", x)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn gas_floor_target(&self) -> U256 {
|
pub fn gas_floor_target(&self) -> U256 {
|
||||||
if self.policy() == Policy::DaoSoft {
|
let d = &self.args.flag_gas_floor_target;
|
||||||
3_141_592.into()
|
U256::from_dec_str(d).unwrap_or_else(|_| {
|
||||||
} else {
|
die!("{}: Invalid target gas floor given. Must be a decimal unsigned 256-bit number.", d)
|
||||||
let d = &self.args.flag_gas_floor_target;
|
})
|
||||||
U256::from_dec_str(d).unwrap_or_else(|_| {
|
|
||||||
die!("{}: Invalid target gas floor given. Must be a decimal unsigned 256-bit number.", d)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn gas_ceil_target(&self) -> U256 {
|
pub fn gas_ceil_target(&self) -> U256 {
|
||||||
if self.policy() == Policy::DaoSoft {
|
let d = &self.args.flag_gas_cap;
|
||||||
3_141_592.into()
|
U256::from_dec_str(d).unwrap_or_else(|_| {
|
||||||
} else {
|
die!("{}: Invalid target gas ceiling given. Must be a decimal unsigned 256-bit number.", d)
|
||||||
let d = &self.args.flag_gas_cap;
|
})
|
||||||
U256::from_dec_str(d).unwrap_or_else(|_| {
|
|
||||||
die!("{}: Invalid target gas ceiling given. Must be a decimal unsigned 256-bit number.", d)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn gas_price(&self) -> U256 {
|
pub fn gas_price(&self) -> U256 {
|
||||||
@ -207,7 +197,7 @@ impl Configuration {
|
|||||||
|
|
||||||
pub fn spec(&self) -> Spec {
|
pub fn spec(&self) -> Spec {
|
||||||
match self.chain().as_str() {
|
match self.chain().as_str() {
|
||||||
"frontier" | "homestead" | "mainnet" => ethereum::new_frontier(self.policy() != Policy::Dogmatic),
|
"frontier" | "homestead" | "mainnet" => ethereum::new_frontier(),
|
||||||
"morden" | "testnet" => ethereum::new_morden(),
|
"morden" | "testnet" => ethereum::new_morden(),
|
||||||
"olympic" => ethereum::new_olympic(),
|
"olympic" => ethereum::new_olympic(),
|
||||||
f => Spec::load(contents(f).unwrap_or_else(|_| {
|
f => Spec::load(contents(f).unwrap_or_else(|_| {
|
||||||
|
@ -97,7 +97,7 @@ use rpc::RpcServer;
|
|||||||
use signer::{SignerServer, new_token};
|
use signer::{SignerServer, new_token};
|
||||||
use dapps::WebappServer;
|
use dapps::WebappServer;
|
||||||
use io_handler::ClientIoHandler;
|
use io_handler::ClientIoHandler;
|
||||||
use configuration::Configuration;
|
use configuration::{Policy, Configuration};
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let conf = Configuration::parse();
|
let conf = Configuration::parse();
|
||||||
@ -199,6 +199,11 @@ fn execute_client(conf: Configuration, spec: Spec, client_config: ClientConfig)
|
|||||||
warn!("NOTE that Signer will not ask you to confirm transactions from unlocked account.");
|
warn!("NOTE that Signer will not ask you to confirm transactions from unlocked account.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check fork settings.
|
||||||
|
if conf.policy() != Policy::None {
|
||||||
|
warn!("Value given for --policy, yet no proposed forks exist. Ignoring.");
|
||||||
|
}
|
||||||
|
|
||||||
// Secret Store
|
// Secret Store
|
||||||
let account_service = Arc::new(conf.account_service());
|
let account_service = Arc::new(conf.account_service());
|
||||||
|
|
||||||
|
@ -19,8 +19,7 @@ use std::fs::File;
|
|||||||
use std::io::{Read, Write, Error as IoError, ErrorKind};
|
use std::io::{Read, Write, Error as IoError, ErrorKind};
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::fmt::{Display, Formatter, Error as FmtError};
|
use std::fmt::{Display, Formatter, Error as FmtError};
|
||||||
use util::migration::{Manager as MigrationManager, Config as MigrationConfig, MigrationIterator};
|
use util::migration::{Manager as MigrationManager, Config as MigrationConfig, Error as MigrationError};
|
||||||
use util::kvdb::{Database, DatabaseConfig, CompactionProfile};
|
|
||||||
use ethcore::migrations;
|
use ethcore::migrations;
|
||||||
|
|
||||||
/// Database is assumed to be at default version, when no version file is found.
|
/// Database is assumed to be at default version, when no version file is found.
|
||||||
@ -65,6 +64,15 @@ impl From<IoError> for Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<MigrationError> for Error {
|
||||||
|
fn from(err: MigrationError) -> Self {
|
||||||
|
match err {
|
||||||
|
MigrationError::Io(e) => Error::Io(e),
|
||||||
|
_ => Error::MigrationFailed,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the version file path.
|
/// Returns the version file path.
|
||||||
fn version_file_path(path: &PathBuf) -> PathBuf {
|
fn version_file_path(path: &PathBuf) -> PathBuf {
|
||||||
let mut file_path = path.clone();
|
let mut file_path = path.clone();
|
||||||
@ -109,14 +117,6 @@ fn extras_database_path(path: &PathBuf) -> PathBuf {
|
|||||||
extras_path
|
extras_path
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Temporary database path used for migration.
|
|
||||||
fn temp_database_path(path: &PathBuf) -> PathBuf {
|
|
||||||
let mut temp_path = path.clone();
|
|
||||||
temp_path.pop();
|
|
||||||
temp_path.push("temp_migration");
|
|
||||||
temp_path
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Database backup
|
/// Database backup
|
||||||
fn backup_database_path(path: &PathBuf) -> PathBuf {
|
fn backup_database_path(path: &PathBuf) -> PathBuf {
|
||||||
let mut backup_path = path.clone();
|
let mut backup_path = path.clone();
|
||||||
@ -146,44 +146,27 @@ fn extras_database_migrations() -> Result<MigrationManager, Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Migrates database at given position with given migration rules.
|
/// Migrates database at given position with given migration rules.
|
||||||
fn migrate_database(version: u32, path: PathBuf, migrations: MigrationManager) -> Result<(), Error> {
|
fn migrate_database(version: u32, db_path: PathBuf, migrations: MigrationManager) -> Result<(), Error> {
|
||||||
// check if migration is needed
|
// check if migration is needed
|
||||||
if !migrations.is_needed(version) {
|
if !migrations.is_needed(version) {
|
||||||
return Ok(())
|
return Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
let temp_path = temp_database_path(&path);
|
let backup_path = backup_database_path(&db_path);
|
||||||
let backup_path = backup_database_path(&path);
|
// remove the backup dir if it exists
|
||||||
// remote the dir if it exists
|
|
||||||
let _ = fs::remove_dir_all(&temp_path);
|
|
||||||
let _ = fs::remove_dir_all(&backup_path);
|
let _ = fs::remove_dir_all(&backup_path);
|
||||||
|
|
||||||
{
|
// migrate old database to the new one
|
||||||
let db_config = DatabaseConfig {
|
let temp_path = try!(migrations.execute(&db_path, version));
|
||||||
prefix_size: None,
|
|
||||||
max_open_files: 64,
|
|
||||||
cache_size: None,
|
|
||||||
compaction: CompactionProfile::default(),
|
|
||||||
};
|
|
||||||
|
|
||||||
// open old database
|
|
||||||
let old = try!(Database::open(&db_config, path.to_str().unwrap()).map_err(|_| Error::MigrationFailed));
|
|
||||||
|
|
||||||
// create new database
|
|
||||||
let mut temp = try!(Database::open(&db_config, temp_path.to_str().unwrap()).map_err(|_| Error::MigrationFailed));
|
|
||||||
|
|
||||||
// migrate old database to the new one
|
|
||||||
try!(migrations.execute(MigrationIterator::from(old.iter()), version, &mut temp).map_err(|_| Error::MigrationFailed));
|
|
||||||
}
|
|
||||||
|
|
||||||
// create backup
|
// create backup
|
||||||
try!(fs::rename(&path, &backup_path));
|
try!(fs::rename(&db_path, &backup_path));
|
||||||
|
|
||||||
// replace the old database with the new one
|
// replace the old database with the new one
|
||||||
if let Err(err) = fs::rename(&temp_path, &path) {
|
if let Err(err) = fs::rename(&temp_path, &db_path) {
|
||||||
// if something went wrong, bring back backup
|
// if something went wrong, bring back backup
|
||||||
try!(fs::rename(&backup_path, path));
|
try!(fs::rename(&backup_path, &db_path));
|
||||||
return Err(From::from(err));
|
return Err(err.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
// remove backup
|
// remove backup
|
||||||
|
@ -200,8 +200,7 @@ const TRANSACTION_COUNT_SPEC: &'static [u8] = br#"{
|
|||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"blockReward": "0x4563918244F40000",
|
"blockReward": "0x4563918244F40000",
|
||||||
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
||||||
"frontierCompatibilityModeLimit": "0xffffffffffffffff",
|
"frontierCompatibilityModeLimit": "0xffffffffffffffff"
|
||||||
"daoRescueSoftFork": false
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -44,10 +44,10 @@
|
|||||||
//! let mut service = NetworkService::new(NetworkConfiguration::new()).unwrap();
|
//! let mut service = NetworkService::new(NetworkConfiguration::new()).unwrap();
|
||||||
//! service.start().unwrap();
|
//! service.start().unwrap();
|
||||||
//! let dir = env::temp_dir();
|
//! let dir = env::temp_dir();
|
||||||
//! let miner = Miner::new(Default::default(), ethereum::new_frontier(true), None);
|
//! let miner = Miner::new(Default::default(), ethereum::new_frontier(), None);
|
||||||
//! let client = Client::new(
|
//! let client = Client::new(
|
||||||
//! ClientConfig::default(),
|
//! ClientConfig::default(),
|
||||||
//! ethereum::new_frontier(true),
|
//! ethereum::new_frontier(),
|
||||||
//! &dir,
|
//! &dir,
|
||||||
//! miner,
|
//! miner,
|
||||||
//! service.io().channel()
|
//! service.io().channel()
|
||||||
|
@ -1,55 +0,0 @@
|
|||||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
|
||||||
// This file is part of Parity.
|
|
||||||
|
|
||||||
// Parity is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
//! `kvdb::Database` as `migration::Destination`
|
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
use kvdb::{Database, DatabaseIterator, DBTransaction};
|
|
||||||
use migration::{Destination, Error};
|
|
||||||
|
|
||||||
/// Database iterator with `Item` complient with migration `Manager` interface.
|
|
||||||
pub struct MigrationIterator {
|
|
||||||
iter: DatabaseIterator,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<DatabaseIterator> for MigrationIterator {
|
|
||||||
fn from(iter: DatabaseIterator) -> Self {
|
|
||||||
MigrationIterator {
|
|
||||||
iter: iter
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Iterator for MigrationIterator {
|
|
||||||
type Item = (Vec<u8>, Vec<u8>);
|
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
|
||||||
self.iter.next().map(|(k, v)| (k.to_vec(), v.to_vec()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Destination for Database {
|
|
||||||
fn commit(&mut self, batch: BTreeMap<Vec<u8>, Vec<u8>>) -> Result<(), Error> {
|
|
||||||
let transaction = DBTransaction::new();
|
|
||||||
|
|
||||||
for keypair in &batch {
|
|
||||||
try!(transaction.put(&keypair.0, &keypair.1).map_err(Error::Custom))
|
|
||||||
}
|
|
||||||
|
|
||||||
self.write(transaction).map_err(Error::Custom)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,117 +0,0 @@
|
|||||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
|
||||||
// This file is part of Parity.
|
|
||||||
|
|
||||||
// Parity is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
//! Migration manager
|
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
use migration::{Migration, Destination};
|
|
||||||
|
|
||||||
/// Migration error.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum Error {
|
|
||||||
/// Error returned when it is impossible to add new migration rules.
|
|
||||||
CannotAddMigration,
|
|
||||||
/// Error returned when migration from specific version can not be performed.
|
|
||||||
MigrationImpossible,
|
|
||||||
/// Custom error.
|
|
||||||
Custom(String),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Migration config.
|
|
||||||
pub struct Config {
|
|
||||||
/// Defines how many elements should be migrated at once.
|
|
||||||
pub batch_size: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Config {
|
|
||||||
fn default() -> Self {
|
|
||||||
Config {
|
|
||||||
batch_size: 1024,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Manages database migration.
|
|
||||||
pub struct Manager {
|
|
||||||
config: Config,
|
|
||||||
migrations: Vec<Box<Migration>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Manager {
|
|
||||||
/// Creates new migration manager with given configuration.
|
|
||||||
pub fn new(config: Config) -> Self {
|
|
||||||
Manager {
|
|
||||||
config: config,
|
|
||||||
migrations: vec![]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Adds new migration rules.
|
|
||||||
pub fn add_migration<T>(&mut self, migration: T) -> Result<(), Error> where T: Migration {
|
|
||||||
let version_match = match self.migrations.last() {
|
|
||||||
Some(last) => last.version() + 1 == migration.version(),
|
|
||||||
None => true,
|
|
||||||
};
|
|
||||||
|
|
||||||
match version_match {
|
|
||||||
true => Ok(self.migrations.push(Box::new(migration))),
|
|
||||||
false => Err(Error::CannotAddMigration),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Performs migration to destination.
|
|
||||||
pub fn execute<D>(&self, db_iter: D, version: u32, destination: &mut Destination) -> Result<(), Error> where
|
|
||||||
D: Iterator<Item = (Vec<u8>, Vec<u8>)> {
|
|
||||||
|
|
||||||
let migrations = try!(self.migrations_from(version).ok_or(Error::MigrationImpossible));
|
|
||||||
|
|
||||||
let mut batch: BTreeMap<Vec<u8>, Vec<u8>> = BTreeMap::new();
|
|
||||||
|
|
||||||
for keypair in db_iter {
|
|
||||||
let migrated = migrations.iter().fold(Some(keypair), |migrated, migration| {
|
|
||||||
migrated.and_then(|(key, value)| migration.simple_migrate(key, value))
|
|
||||||
});
|
|
||||||
|
|
||||||
if let Some((key, value)) = migrated {
|
|
||||||
batch.insert(key, value);
|
|
||||||
}
|
|
||||||
|
|
||||||
if batch.len() == self.config.batch_size {
|
|
||||||
try!(destination.commit(batch));
|
|
||||||
batch = BTreeMap::new();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
try!(destination.commit(batch));
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns true if migration is needed.
|
|
||||||
pub fn is_needed(&self, version: u32) -> bool {
|
|
||||||
match self.migrations.last() {
|
|
||||||
Some(last) => version < last.version(),
|
|
||||||
None => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn migrations_from(&self, version: u32) -> Option<&[Box<Migration>]> {
|
|
||||||
// index of the first required migration
|
|
||||||
let position = self.migrations.iter().position(|m| m.version() == version + 1);
|
|
||||||
position.map(|p| &self.migrations[p..])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -15,19 +15,58 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
//! DB Migration module.
|
//! DB Migration module.
|
||||||
|
|
||||||
mod db_impl;
|
|
||||||
mod manager;
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests;
|
mod tests;
|
||||||
|
|
||||||
pub use self::manager::{Error, Config, Manager};
|
|
||||||
pub use self::db_impl::MigrationIterator;
|
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
use std::fs;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
/// Single migration.
|
use ::kvdb::{CompactionProfile, Database, DatabaseConfig, DBTransaction};
|
||||||
|
|
||||||
|
/// Migration config.
|
||||||
|
pub struct Config {
|
||||||
|
/// Defines how many elements should be migrated at once.
|
||||||
|
pub batch_size: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Config {
|
||||||
|
fn default() -> Self {
|
||||||
|
Config {
|
||||||
|
batch_size: 1024,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Migration error.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum Error {
|
||||||
|
/// Error returned when it is impossible to add new migration rules.
|
||||||
|
CannotAddMigration,
|
||||||
|
/// Error returned when migration from specific version can not be performed.
|
||||||
|
MigrationImpossible,
|
||||||
|
/// Io Error.
|
||||||
|
Io(::std::io::Error),
|
||||||
|
/// Custom error.
|
||||||
|
Custom(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<::std::io::Error> for Error {
|
||||||
|
fn from(e: ::std::io::Error) -> Self {
|
||||||
|
Error::Io(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A generalized migration from the given db to a destination db.
|
||||||
pub trait Migration: 'static {
|
pub trait Migration: 'static {
|
||||||
|
/// Version of the database after the migration.
|
||||||
|
fn version(&self) -> u32;
|
||||||
|
/// Migrate a source to a destination.
|
||||||
|
fn migrate(&self, source: &Database, config: &Config, destination: &mut Database) -> Result<(), Error>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A simple migration over key-value pairs.
|
||||||
|
pub trait SimpleMigration: 'static {
|
||||||
/// Version of database after the migration.
|
/// Version of database after the migration.
|
||||||
fn version(&self) -> u32;
|
fn version(&self) -> u32;
|
||||||
/// Should migrate existing object to new database.
|
/// Should migrate existing object to new database.
|
||||||
@ -35,8 +74,152 @@ pub trait Migration: 'static {
|
|||||||
fn simple_migrate(&self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)>;
|
fn simple_migrate(&self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Migration destination.
|
impl<T: SimpleMigration> Migration for T {
|
||||||
pub trait Destination {
|
fn version(&self) -> u32 { SimpleMigration::version(self) }
|
||||||
/// Called on destination to commit batch of migrated entries.
|
|
||||||
fn commit(&mut self, batch: BTreeMap<Vec<u8>, Vec<u8>>) -> Result<(), Error>;
|
fn migrate(&self, source: &Database, config: &Config, dest: &mut Database) -> Result<(), Error> {
|
||||||
|
let mut batch: BTreeMap<Vec<u8>, Vec<u8>> = BTreeMap::new();
|
||||||
|
|
||||||
|
for (key, value) in source.iter() {
|
||||||
|
|
||||||
|
if let Some((key, value)) = self.simple_migrate(key.to_vec(), value.to_vec()) {
|
||||||
|
batch.insert(key, value);
|
||||||
|
}
|
||||||
|
|
||||||
|
if batch.len() == config.batch_size {
|
||||||
|
try!(commit_batch(dest, &batch));
|
||||||
|
batch.clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if batch.len() != 0 {
|
||||||
|
try!(commit_batch(dest, &batch));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Commit a batch of writes to a database.
|
||||||
|
pub fn commit_batch(db: &mut Database, batch: &BTreeMap<Vec<u8>, Vec<u8>>) -> Result<(), Error> {
|
||||||
|
let transaction = DBTransaction::new();
|
||||||
|
|
||||||
|
for keypair in batch {
|
||||||
|
try!(transaction.put(&keypair.0, &keypair.1).map_err(Error::Custom));
|
||||||
|
}
|
||||||
|
|
||||||
|
db.write(transaction).map_err(Error::Custom)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the path where all databases reside.
|
||||||
|
fn database_path(path: &Path) -> PathBuf {
|
||||||
|
let mut temp_path = path.to_owned();
|
||||||
|
temp_path.pop();
|
||||||
|
temp_path
|
||||||
|
}
|
||||||
|
|
||||||
|
enum TempIndex {
|
||||||
|
One,
|
||||||
|
Two,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TempIndex {
|
||||||
|
fn swap(&mut self) {
|
||||||
|
match *self {
|
||||||
|
TempIndex::One => *self = TempIndex::Two,
|
||||||
|
TempIndex::Two => *self = TempIndex::One,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// given the path to the old database, get the path of this one.
|
||||||
|
fn path(&self, db_root: &Path) -> PathBuf {
|
||||||
|
let mut buf = db_root.to_owned();
|
||||||
|
|
||||||
|
match *self {
|
||||||
|
TempIndex::One => buf.push("temp_migration_1"),
|
||||||
|
TempIndex::Two => buf.push("temp_migration_2"),
|
||||||
|
};
|
||||||
|
|
||||||
|
buf
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Manages database migration.
|
||||||
|
pub struct Manager {
|
||||||
|
config: Config,
|
||||||
|
migrations: Vec<Box<Migration>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Manager {
|
||||||
|
/// Creates new migration manager with given configuration.
|
||||||
|
pub fn new(config: Config) -> Self {
|
||||||
|
Manager {
|
||||||
|
config: config,
|
||||||
|
migrations: vec![],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds new migration rules.
|
||||||
|
pub fn add_migration<T>(&mut self, migration: T) -> Result<(), Error> where T: Migration {
|
||||||
|
let version_match = match self.migrations.last() {
|
||||||
|
Some(last) => last.version() + 1 == migration.version(),
|
||||||
|
None => true,
|
||||||
|
};
|
||||||
|
|
||||||
|
match version_match {
|
||||||
|
true => Ok(self.migrations.push(Box::new(migration))),
|
||||||
|
false => Err(Error::CannotAddMigration),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Performs migration in order, starting with a source path, migrating between two temporary databases,
|
||||||
|
/// and producing a path where the final migration lives.
|
||||||
|
pub fn execute(&self, old_path: &Path, version: u32) -> Result<PathBuf, Error> {
|
||||||
|
let migrations = try!(self.migrations_from(version).ok_or(Error::MigrationImpossible));
|
||||||
|
let db_config = DatabaseConfig {
|
||||||
|
prefix_size: None,
|
||||||
|
max_open_files: 64,
|
||||||
|
cache_size: None,
|
||||||
|
compaction: CompactionProfile::default(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let db_root = database_path(old_path);
|
||||||
|
let mut temp_idx = TempIndex::One;
|
||||||
|
let mut temp_path = temp_idx.path(&db_root);
|
||||||
|
|
||||||
|
// start with the old db.
|
||||||
|
let old_path_str = try!(old_path.to_str().ok_or(Error::MigrationImpossible));
|
||||||
|
let mut cur_db = try!(Database::open(&db_config, old_path_str).map_err(|s| Error::Custom(s)));
|
||||||
|
for migration in migrations {
|
||||||
|
// open the target temporary database.
|
||||||
|
temp_path = temp_idx.path(&db_root);
|
||||||
|
let temp_path_str = try!(temp_path.to_str().ok_or(Error::MigrationImpossible));
|
||||||
|
let mut new_db = try!(Database::open(&db_config, temp_path_str).map_err(|s| Error::Custom(s)));
|
||||||
|
|
||||||
|
// perform the migration from cur_db to new_db.
|
||||||
|
try!(migration.migrate(&cur_db, &self.config, &mut new_db));
|
||||||
|
// next iteration, we will migrate from this db into the other temp.
|
||||||
|
cur_db = new_db;
|
||||||
|
temp_idx.swap();
|
||||||
|
|
||||||
|
// remove the other temporary migration database.
|
||||||
|
let _ = fs::remove_dir_all(temp_idx.path(&db_root));
|
||||||
|
}
|
||||||
|
Ok(temp_path)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns true if migration is needed.
|
||||||
|
pub fn is_needed(&self, version: u32) -> bool {
|
||||||
|
match self.migrations.last() {
|
||||||
|
Some(last) => version < last.version(),
|
||||||
|
None => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn migrations_from(&self, version: u32) -> Option<&[Box<Migration>]> {
|
||||||
|
// index of the first required migration
|
||||||
|
let position = self.migrations.iter().position(|m| m.version() == version + 1);
|
||||||
|
position.map(|p| &self.migrations[p..])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@ -14,19 +14,50 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
//! Tests for migrations.
|
||||||
use migration::{Error, Destination, Migration, Manager, Config};
|
//! A random temp directory is created. A database is created within it, and migrations
|
||||||
|
//! are performed in temp sub-directories.
|
||||||
|
|
||||||
impl Destination for BTreeMap<Vec<u8>, Vec<u8>> {
|
use common::*;
|
||||||
fn commit(&mut self, batch: BTreeMap<Vec<u8>, Vec<u8>>) -> Result<(), Error> {
|
use migration::{Config, SimpleMigration, Manager};
|
||||||
self.extend(batch);
|
use kvdb::{Database, DBTransaction};
|
||||||
Ok(())
|
|
||||||
|
use devtools::RandomTempPath;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
fn db_path(path: &Path) -> PathBuf {
|
||||||
|
let mut p = path.to_owned();
|
||||||
|
p.push("db");
|
||||||
|
p
|
||||||
|
}
|
||||||
|
|
||||||
|
// initialize a database at the given directory with the given values.
|
||||||
|
fn make_db(path: &Path, pairs: BTreeMap<Vec<u8>, Vec<u8>>) {
|
||||||
|
let db = Database::open_default(path.to_str().unwrap()).expect("failed to open temp database");
|
||||||
|
{
|
||||||
|
let transaction = DBTransaction::new();
|
||||||
|
for (k, v) in pairs {
|
||||||
|
transaction.put(&k, &v).expect("failed to add pair to transaction");
|
||||||
|
}
|
||||||
|
|
||||||
|
db.write(transaction).expect("failed to write db transaction");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// helper for verifying a migrated database.
|
||||||
|
fn verify_migration(path: &Path, pairs: BTreeMap<Vec<u8>, Vec<u8>>) {
|
||||||
|
let db = Database::open_default(path.to_str().unwrap()).unwrap();
|
||||||
|
|
||||||
|
for (k, v) in pairs {
|
||||||
|
let x = db.get(&k).unwrap().unwrap();
|
||||||
|
|
||||||
|
assert_eq!(&x[..], &v[..]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Migration0;
|
struct Migration0;
|
||||||
|
|
||||||
impl Migration for Migration0 {
|
impl SimpleMigration for Migration0 {
|
||||||
fn version(&self) -> u32 {
|
fn version(&self) -> u32 {
|
||||||
1
|
1
|
||||||
}
|
}
|
||||||
@ -42,7 +73,7 @@ impl Migration for Migration0 {
|
|||||||
|
|
||||||
struct Migration1;
|
struct Migration1;
|
||||||
|
|
||||||
impl Migration for Migration1 {
|
impl SimpleMigration for Migration1 {
|
||||||
fn version(&self) -> u32 {
|
fn version(&self) -> u32 {
|
||||||
2
|
2
|
||||||
}
|
}
|
||||||
@ -54,68 +85,58 @@ impl Migration for Migration1 {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn one_simple_migration() {
|
fn one_simple_migration() {
|
||||||
|
let dir = RandomTempPath::create_dir();
|
||||||
|
let db_path = db_path(dir.as_path());
|
||||||
let mut manager = Manager::new(Config::default());
|
let mut manager = Manager::new(Config::default());
|
||||||
let keys = vec![vec![], vec![1u8]];
|
make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]);
|
||||||
let values = vec![vec![], vec![1u8]];
|
let expected = map![vec![0x11] => vec![0x22], vec![1, 0x11] => vec![1, 0x22]];
|
||||||
let db = keys.into_iter().zip(values.into_iter());
|
|
||||||
|
|
||||||
let expected_keys = vec![vec![0x11u8], vec![1, 0x11]];
|
|
||||||
let expected_values = vec![vec![0x22u8], vec![1, 0x22]];
|
|
||||||
let expected_db = expected_keys.into_iter().zip(expected_values.into_iter()).collect::<BTreeMap<_, _>>();
|
|
||||||
|
|
||||||
let mut result = BTreeMap::new();
|
|
||||||
manager.add_migration(Migration0).unwrap();
|
manager.add_migration(Migration0).unwrap();
|
||||||
manager.execute(db, 0, &mut result).unwrap();
|
let end_path = manager.execute(&db_path, 0).unwrap();
|
||||||
assert_eq!(expected_db, result);
|
|
||||||
|
verify_migration(&end_path, expected);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[should_panic]
|
#[should_panic]
|
||||||
fn no_migration_needed() {
|
fn no_migration_needed() {
|
||||||
|
let dir = RandomTempPath::create_dir();
|
||||||
|
let db_path = db_path(dir.as_path());
|
||||||
let mut manager = Manager::new(Config::default());
|
let mut manager = Manager::new(Config::default());
|
||||||
let keys = vec![vec![], vec![1u8]];
|
make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]);
|
||||||
let values = vec![vec![], vec![1u8]];
|
|
||||||
let db = keys.into_iter().zip(values.into_iter());
|
|
||||||
|
|
||||||
let mut result = BTreeMap::new();
|
|
||||||
manager.add_migration(Migration0).unwrap();
|
manager.add_migration(Migration0).unwrap();
|
||||||
manager.execute(db, 1, &mut result).unwrap();
|
manager.execute(&db_path, 1).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn multiple_migrations() {
|
fn multiple_migrations() {
|
||||||
|
let dir = RandomTempPath::create_dir();
|
||||||
|
let db_path = db_path(dir.as_path());
|
||||||
let mut manager = Manager::new(Config::default());
|
let mut manager = Manager::new(Config::default());
|
||||||
let keys = vec![vec![], vec![1u8]];
|
make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]);
|
||||||
let values = vec![vec![], vec![1u8]];
|
let expected = map![vec![0x11] => vec![], vec![1, 0x11] => vec![]];
|
||||||
let db = keys.into_iter().zip(values.into_iter());
|
|
||||||
|
|
||||||
let expected_keys = vec![vec![0x11u8], vec![1, 0x11]];
|
|
||||||
let expected_values = vec![vec![], vec![]];
|
|
||||||
let expected_db = expected_keys.into_iter().zip(expected_values.into_iter()).collect::<BTreeMap<_, _>>();
|
|
||||||
|
|
||||||
let mut result = BTreeMap::new();
|
|
||||||
manager.add_migration(Migration0).unwrap();
|
manager.add_migration(Migration0).unwrap();
|
||||||
manager.add_migration(Migration1).unwrap();
|
manager.add_migration(Migration1).unwrap();
|
||||||
manager.execute(db, 0, &mut result).unwrap();
|
let end_path = manager.execute(&db_path, 0).unwrap();
|
||||||
assert_eq!(expected_db, result);
|
|
||||||
|
verify_migration(&end_path, expected);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn second_migration() {
|
fn second_migration() {
|
||||||
|
let dir = RandomTempPath::create_dir();
|
||||||
|
let db_path = db_path(dir.as_path());
|
||||||
let mut manager = Manager::new(Config::default());
|
let mut manager = Manager::new(Config::default());
|
||||||
let keys = vec![vec![], vec![1u8]];
|
make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]);
|
||||||
let values = vec![vec![], vec![1u8]];
|
let expected = map![vec![] => vec![], vec![1] => vec![]];
|
||||||
let db = keys.into_iter().zip(values.into_iter());
|
|
||||||
|
|
||||||
let expected_keys = vec![vec![], vec![1u8]];
|
|
||||||
let expected_values = vec![vec![], vec![]];
|
|
||||||
let expected_db = expected_keys.into_iter().zip(expected_values.into_iter()).collect::<BTreeMap<_, _>>();
|
|
||||||
|
|
||||||
let mut result = BTreeMap::new();
|
|
||||||
manager.add_migration(Migration0).unwrap();
|
manager.add_migration(Migration0).unwrap();
|
||||||
manager.add_migration(Migration1).unwrap();
|
manager.add_migration(Migration1).unwrap();
|
||||||
manager.execute(db, 1, &mut result).unwrap();
|
let end_path = manager.execute(&db_path, 1).unwrap();
|
||||||
assert_eq!(expected_db, result);
|
|
||||||
|
verify_migration(&end_path, expected);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
Loading…
Reference in New Issue
Block a user