block cleanup (#9117)
* blockchain insert expects owned block instead of block reference * reduce a number of times a block is deserialized * removed cached uncle_bytes from block * removed is_finalized from OpenBlock * removed unused parity_machine::WithMetadata trait * removed commented out code * remove unused metadata from block * remove unused metadata from block * BlockDetails extras may have at most 5 elements
This commit is contained in:
parent
a809621f63
commit
c54beba932
@ -116,10 +116,6 @@ pub struct ExecutedBlock {
|
|||||||
pub traces: Tracing,
|
pub traces: Tracing,
|
||||||
/// Hashes of last 256 blocks.
|
/// Hashes of last 256 blocks.
|
||||||
pub last_hashes: Arc<LastHashes>,
|
pub last_hashes: Arc<LastHashes>,
|
||||||
/// Finalization flag.
|
|
||||||
pub is_finalized: bool,
|
|
||||||
/// Block metadata.
|
|
||||||
pub metadata: Option<Vec<u8>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ExecutedBlock {
|
impl ExecutedBlock {
|
||||||
@ -138,8 +134,6 @@ impl ExecutedBlock {
|
|||||||
Tracing::Disabled
|
Tracing::Disabled
|
||||||
},
|
},
|
||||||
last_hashes: last_hashes,
|
last_hashes: last_hashes,
|
||||||
is_finalized: false,
|
|
||||||
metadata: None,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -228,26 +222,6 @@ impl ::parity_machine::Transactions for ExecutedBlock {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ::parity_machine::Finalizable for ExecutedBlock {
|
|
||||||
fn is_finalized(&self) -> bool {
|
|
||||||
self.is_finalized
|
|
||||||
}
|
|
||||||
|
|
||||||
fn mark_finalized(&mut self) {
|
|
||||||
self.is_finalized = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ::parity_machine::WithMetadata for ExecutedBlock {
|
|
||||||
fn metadata(&self) -> Option<&[u8]> {
|
|
||||||
self.metadata.as_ref().map(|v| v.as_ref())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn set_metadata(&mut self, value: Option<Vec<u8>>) {
|
|
||||||
self.metadata = value;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Block that is ready for transactions to be added.
|
/// Block that is ready for transactions to be added.
|
||||||
///
|
///
|
||||||
/// It's a bit like a Vec<Transaction>, except that whenever a transaction is pushed, we execute it and
|
/// It's a bit like a Vec<Transaction>, except that whenever a transaction is pushed, we execute it and
|
||||||
@ -264,10 +238,7 @@ pub struct OpenBlock<'x> {
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct ClosedBlock {
|
pub struct ClosedBlock {
|
||||||
block: ExecutedBlock,
|
block: ExecutedBlock,
|
||||||
uncle_bytes: Bytes,
|
|
||||||
unclosed_state: State<StateDB>,
|
unclosed_state: State<StateDB>,
|
||||||
unclosed_finalization_state: bool,
|
|
||||||
unclosed_metadata: Option<Vec<u8>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Just like `ClosedBlock` except that we can't reopen it and it's faster.
|
/// Just like `ClosedBlock` except that we can't reopen it and it's faster.
|
||||||
@ -276,7 +247,6 @@ pub struct ClosedBlock {
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct LockedBlock {
|
pub struct LockedBlock {
|
||||||
block: ExecutedBlock,
|
block: ExecutedBlock,
|
||||||
uncle_bytes: Bytes,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A block that has a valid seal.
|
/// A block that has a valid seal.
|
||||||
@ -284,7 +254,6 @@ pub struct LockedBlock {
|
|||||||
/// The block's header has valid seal arguments. The block cannot be reversed into a `ClosedBlock` or `OpenBlock`.
|
/// The block's header has valid seal arguments. The block cannot be reversed into a `ClosedBlock` or `OpenBlock`.
|
||||||
pub struct SealedBlock {
|
pub struct SealedBlock {
|
||||||
block: ExecutedBlock,
|
block: ExecutedBlock,
|
||||||
uncle_bytes: Bytes,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'x> OpenBlock<'x> {
|
impl<'x> OpenBlock<'x> {
|
||||||
@ -432,14 +401,12 @@ impl<'x> OpenBlock<'x> {
|
|||||||
let mut s = self;
|
let mut s = self;
|
||||||
|
|
||||||
let unclosed_state = s.block.state.clone();
|
let unclosed_state = s.block.state.clone();
|
||||||
let unclosed_metadata = s.block.metadata.clone();
|
|
||||||
let unclosed_finalization_state = s.block.is_finalized;
|
|
||||||
|
|
||||||
s.engine.on_close_block(&mut s.block)?;
|
s.engine.on_close_block(&mut s.block)?;
|
||||||
s.block.state.commit()?;
|
s.block.state.commit()?;
|
||||||
|
|
||||||
s.block.header.set_transactions_root(ordered_trie_root(s.block.transactions.iter().map(|e| e.rlp_bytes())));
|
s.block.header.set_transactions_root(ordered_trie_root(s.block.transactions.iter().map(|e| e.rlp_bytes())));
|
||||||
let uncle_bytes = encode_list(&s.block.uncles).into_vec();
|
let uncle_bytes = encode_list(&s.block.uncles);
|
||||||
s.block.header.set_uncles_hash(keccak(&uncle_bytes));
|
s.block.header.set_uncles_hash(keccak(&uncle_bytes));
|
||||||
s.block.header.set_state_root(s.block.state.root().clone());
|
s.block.header.set_state_root(s.block.state.root().clone());
|
||||||
s.block.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes())));
|
s.block.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes())));
|
||||||
@ -451,10 +418,7 @@ impl<'x> OpenBlock<'x> {
|
|||||||
|
|
||||||
Ok(ClosedBlock {
|
Ok(ClosedBlock {
|
||||||
block: s.block,
|
block: s.block,
|
||||||
uncle_bytes,
|
|
||||||
unclosed_state,
|
unclosed_state,
|
||||||
unclosed_metadata,
|
|
||||||
unclosed_finalization_state,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -468,8 +432,8 @@ impl<'x> OpenBlock<'x> {
|
|||||||
if s.block.header.transactions_root().is_zero() || s.block.header.transactions_root() == &KECCAK_NULL_RLP {
|
if s.block.header.transactions_root().is_zero() || s.block.header.transactions_root() == &KECCAK_NULL_RLP {
|
||||||
s.block.header.set_transactions_root(ordered_trie_root(s.block.transactions.iter().map(|e| e.rlp_bytes())));
|
s.block.header.set_transactions_root(ordered_trie_root(s.block.transactions.iter().map(|e| e.rlp_bytes())));
|
||||||
}
|
}
|
||||||
let uncle_bytes = encode_list(&s.block.uncles).into_vec();
|
|
||||||
if s.block.header.uncles_hash().is_zero() || s.block.header.uncles_hash() == &KECCAK_EMPTY_LIST_RLP {
|
if s.block.header.uncles_hash().is_zero() || s.block.header.uncles_hash() == &KECCAK_EMPTY_LIST_RLP {
|
||||||
|
let uncle_bytes = encode_list(&s.block.uncles);
|
||||||
s.block.header.set_uncles_hash(keccak(&uncle_bytes));
|
s.block.header.set_uncles_hash(keccak(&uncle_bytes));
|
||||||
}
|
}
|
||||||
if s.block.header.receipts_root().is_zero() || s.block.header.receipts_root() == &KECCAK_NULL_RLP {
|
if s.block.header.receipts_root().is_zero() || s.block.header.receipts_root() == &KECCAK_NULL_RLP {
|
||||||
@ -485,7 +449,6 @@ impl<'x> OpenBlock<'x> {
|
|||||||
|
|
||||||
Ok(LockedBlock {
|
Ok(LockedBlock {
|
||||||
block: s.block,
|
block: s.block,
|
||||||
uncle_bytes,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -514,7 +477,6 @@ impl ClosedBlock {
|
|||||||
pub fn lock(self) -> LockedBlock {
|
pub fn lock(self) -> LockedBlock {
|
||||||
LockedBlock {
|
LockedBlock {
|
||||||
block: self.block,
|
block: self.block,
|
||||||
uncle_bytes: self.uncle_bytes,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -523,8 +485,6 @@ impl ClosedBlock {
|
|||||||
// revert rewards (i.e. set state back at last transaction's state).
|
// revert rewards (i.e. set state back at last transaction's state).
|
||||||
let mut block = self.block;
|
let mut block = self.block;
|
||||||
block.state = self.unclosed_state;
|
block.state = self.unclosed_state;
|
||||||
block.metadata = self.unclosed_metadata;
|
|
||||||
block.is_finalized = self.unclosed_finalization_state;
|
|
||||||
OpenBlock {
|
OpenBlock {
|
||||||
block: block,
|
block: block,
|
||||||
engine: engine,
|
engine: engine,
|
||||||
@ -533,7 +493,6 @@ impl ClosedBlock {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl LockedBlock {
|
impl LockedBlock {
|
||||||
|
|
||||||
/// Removes outcomes from receipts and updates the receipt root.
|
/// Removes outcomes from receipts and updates the receipt root.
|
||||||
///
|
///
|
||||||
/// This is done after the block is enacted for historical reasons.
|
/// This is done after the block is enacted for historical reasons.
|
||||||
@ -566,7 +525,9 @@ impl LockedBlock {
|
|||||||
}
|
}
|
||||||
s.block.header.set_seal(seal);
|
s.block.header.set_seal(seal);
|
||||||
s.block.header.compute_hash();
|
s.block.header.compute_hash();
|
||||||
Ok(SealedBlock { block: s.block, uncle_bytes: s.uncle_bytes })
|
Ok(SealedBlock {
|
||||||
|
block: s.block
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Provide a valid seal in order to turn this into a `SealedBlock`.
|
/// Provide a valid seal in order to turn this into a `SealedBlock`.
|
||||||
@ -584,7 +545,9 @@ impl LockedBlock {
|
|||||||
// TODO: passing state context to avoid engines owning it?
|
// TODO: passing state context to avoid engines owning it?
|
||||||
match engine.verify_local_seal(&s.block.header) {
|
match engine.verify_local_seal(&s.block.header) {
|
||||||
Err(e) => Err((e, s)),
|
Err(e) => Err((e, s)),
|
||||||
_ => Ok(SealedBlock { block: s.block, uncle_bytes: s.uncle_bytes }),
|
_ => Ok(SealedBlock {
|
||||||
|
block: s.block
|
||||||
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -601,7 +564,7 @@ impl SealedBlock {
|
|||||||
let mut block_rlp = RlpStream::new_list(3);
|
let mut block_rlp = RlpStream::new_list(3);
|
||||||
block_rlp.append(&self.block.header);
|
block_rlp.append(&self.block.header);
|
||||||
block_rlp.append_list(&self.block.transactions);
|
block_rlp.append_list(&self.block.transactions);
|
||||||
block_rlp.append_raw(&self.uncle_bytes, 1);
|
block_rlp.append_list(&self.block.uncles);
|
||||||
block_rlp.out()
|
block_rlp.out()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -450,9 +450,7 @@ impl<'a> Iterator for AncestryWithMetadataIter<'a> {
|
|||||||
Some(ExtendedHeader {
|
Some(ExtendedHeader {
|
||||||
parent_total_difficulty: details.total_difficulty - *header.difficulty(),
|
parent_total_difficulty: details.total_difficulty - *header.difficulty(),
|
||||||
is_finalized: details.is_finalized,
|
is_finalized: details.is_finalized,
|
||||||
metadata: details.metadata,
|
header,
|
||||||
|
|
||||||
header: header,
|
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
_ => {
|
_ => {
|
||||||
@ -555,7 +553,6 @@ impl BlockChain {
|
|||||||
parent: header.parent_hash(),
|
parent: header.parent_hash(),
|
||||||
children: vec![],
|
children: vec![],
|
||||||
is_finalized: false,
|
is_finalized: false,
|
||||||
metadata: None,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut batch = DBTransaction::new();
|
let mut batch = DBTransaction::new();
|
||||||
@ -759,10 +756,11 @@ impl BlockChain {
|
|||||||
/// `parent_td` is a parent total diffuculty
|
/// `parent_td` is a parent total diffuculty
|
||||||
/// Supply a dummy parent total difficulty when the parent block may not be in the chain.
|
/// Supply a dummy parent total difficulty when the parent block may not be in the chain.
|
||||||
/// Returns true if the block is disconnected.
|
/// Returns true if the block is disconnected.
|
||||||
pub fn insert_unordered_block(&self, batch: &mut DBTransaction, bytes: &[u8], receipts: Vec<Receipt>, parent_td: Option<U256>, is_best: bool, is_ancient: bool) -> bool {
|
pub fn insert_unordered_block(&self, batch: &mut DBTransaction, block: encoded::Block, receipts: Vec<Receipt>, parent_td: Option<U256>, is_best: bool, is_ancient: bool) -> bool {
|
||||||
let block = view!(BlockView, bytes);
|
let block_number = block.header_view().number();
|
||||||
let header = block.header_view();
|
let block_parent_hash = block.header_view().parent_hash();
|
||||||
let hash = header.hash();
|
let block_difficulty = block.header_view().difficulty();
|
||||||
|
let hash = block.header_view().hash();
|
||||||
|
|
||||||
if self.is_known(&hash) {
|
if self.is_known(&hash) {
|
||||||
return false;
|
return false;
|
||||||
@ -770,45 +768,45 @@ impl BlockChain {
|
|||||||
|
|
||||||
assert!(self.pending_best_block.read().is_none());
|
assert!(self.pending_best_block.read().is_none());
|
||||||
|
|
||||||
let compressed_header = compress(block.header_rlp().as_raw(), blocks_swapper());
|
let compressed_header = compress(block.header_view().rlp().as_raw(), blocks_swapper());
|
||||||
let compressed_body = compress(&Self::block_to_body(bytes), blocks_swapper());
|
let compressed_body = compress(&Self::block_to_body(block.raw()), blocks_swapper());
|
||||||
|
|
||||||
// store block in db
|
// store block in db
|
||||||
batch.put(db::COL_HEADERS, &hash, &compressed_header);
|
batch.put(db::COL_HEADERS, &hash, &compressed_header);
|
||||||
batch.put(db::COL_BODIES, &hash, &compressed_body);
|
batch.put(db::COL_BODIES, &hash, &compressed_body);
|
||||||
|
|
||||||
let maybe_parent = self.block_details(&header.parent_hash());
|
let maybe_parent = self.block_details(&block_parent_hash);
|
||||||
|
|
||||||
if let Some(parent_details) = maybe_parent {
|
if let Some(parent_details) = maybe_parent {
|
||||||
// parent known to be in chain.
|
// parent known to be in chain.
|
||||||
let info = BlockInfo {
|
let info = BlockInfo {
|
||||||
hash: hash,
|
hash: hash,
|
||||||
number: header.number(),
|
number: block_number,
|
||||||
total_difficulty: parent_details.total_difficulty + header.difficulty(),
|
total_difficulty: parent_details.total_difficulty + block_difficulty,
|
||||||
location: BlockLocation::CanonChain,
|
location: BlockLocation::CanonChain,
|
||||||
};
|
};
|
||||||
|
|
||||||
self.prepare_update(batch, ExtrasUpdate {
|
self.prepare_update(batch, ExtrasUpdate {
|
||||||
block_hashes: self.prepare_block_hashes_update(bytes, &info),
|
block_hashes: self.prepare_block_hashes_update(&info),
|
||||||
block_details: self.prepare_block_details_update(bytes, &info, false, None),
|
block_details: self.prepare_block_details_update(block_parent_hash, &info, false),
|
||||||
block_receipts: self.prepare_block_receipts_update(receipts, &info),
|
block_receipts: self.prepare_block_receipts_update(receipts, &info),
|
||||||
blocks_blooms: self.prepare_block_blooms_update(bytes, &info),
|
blocks_blooms: self.prepare_block_blooms_update(block.header_view().log_bloom(), &info),
|
||||||
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
|
transactions_addresses: self.prepare_transaction_addresses_update(block.view().transaction_hashes(), &info),
|
||||||
info: info,
|
info: info,
|
||||||
block: bytes
|
block,
|
||||||
}, is_best);
|
}, is_best);
|
||||||
|
|
||||||
if is_ancient {
|
if is_ancient {
|
||||||
let mut best_ancient_block = self.best_ancient_block.write();
|
let mut best_ancient_block = self.best_ancient_block.write();
|
||||||
let ancient_number = best_ancient_block.as_ref().map_or(0, |b| b.number);
|
let ancient_number = best_ancient_block.as_ref().map_or(0, |b| b.number);
|
||||||
if self.block_hash(header.number() + 1).is_some() {
|
if self.block_hash(block_number + 1).is_some() {
|
||||||
batch.delete(db::COL_EXTRA, b"ancient");
|
batch.delete(db::COL_EXTRA, b"ancient");
|
||||||
*best_ancient_block = None;
|
*best_ancient_block = None;
|
||||||
} else if header.number() > ancient_number {
|
} else if block_number > ancient_number {
|
||||||
batch.put(db::COL_EXTRA, b"ancient", &hash);
|
batch.put(db::COL_EXTRA, b"ancient", &hash);
|
||||||
*best_ancient_block = Some(BestAncientBlock {
|
*best_ancient_block = Some(BestAncientBlock {
|
||||||
hash: hash,
|
hash: hash,
|
||||||
number: header.number(),
|
number: block_number,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -821,32 +819,31 @@ impl BlockChain {
|
|||||||
|
|
||||||
let info = BlockInfo {
|
let info = BlockInfo {
|
||||||
hash: hash,
|
hash: hash,
|
||||||
number: header.number(),
|
number: block_number,
|
||||||
total_difficulty: d + header.difficulty(),
|
total_difficulty: d + block_difficulty,
|
||||||
location: BlockLocation::CanonChain,
|
location: BlockLocation::CanonChain,
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO [sorpaas] support warp sync insertion of finalization and metadata.
|
// TODO [sorpaas] support warp sync insertion of finalization and metadata.
|
||||||
let block_details = BlockDetails {
|
let block_details = BlockDetails {
|
||||||
number: header.number(),
|
number: block_number,
|
||||||
total_difficulty: info.total_difficulty,
|
total_difficulty: info.total_difficulty,
|
||||||
parent: header.parent_hash(),
|
parent: block_parent_hash,
|
||||||
children: Vec::new(),
|
children: Vec::new(),
|
||||||
is_finalized: false,
|
is_finalized: false,
|
||||||
metadata: None,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut update = HashMap::new();
|
let mut update = HashMap::new();
|
||||||
update.insert(hash, block_details);
|
update.insert(hash, block_details);
|
||||||
|
|
||||||
self.prepare_update(batch, ExtrasUpdate {
|
self.prepare_update(batch, ExtrasUpdate {
|
||||||
block_hashes: self.prepare_block_hashes_update(bytes, &info),
|
block_hashes: self.prepare_block_hashes_update(&info),
|
||||||
block_details: update,
|
block_details: update,
|
||||||
block_receipts: self.prepare_block_receipts_update(receipts, &info),
|
block_receipts: self.prepare_block_receipts_update(receipts, &info),
|
||||||
blocks_blooms: self.prepare_block_blooms_update(bytes, &info),
|
blocks_blooms: self.prepare_block_blooms_update(block.header_view().log_bloom(), &info),
|
||||||
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
|
transactions_addresses: self.prepare_transaction_addresses_update(block.view().transaction_hashes(), &info),
|
||||||
info: info,
|
info: info,
|
||||||
block: bytes,
|
block,
|
||||||
}, is_best);
|
}, is_best);
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
@ -958,41 +955,36 @@ impl BlockChain {
|
|||||||
/// Inserts the block into backing cache database.
|
/// Inserts the block into backing cache database.
|
||||||
/// Expects the block to be valid and already verified.
|
/// Expects the block to be valid and already verified.
|
||||||
/// If the block is already known, does nothing.
|
/// If the block is already known, does nothing.
|
||||||
pub fn insert_block(&self, batch: &mut DBTransaction, bytes: &[u8], receipts: Vec<Receipt>, extras: ExtrasInsert) -> ImportRoute {
|
pub fn insert_block(&self, batch: &mut DBTransaction, block: encoded::Block, receipts: Vec<Receipt>, extras: ExtrasInsert) -> ImportRoute {
|
||||||
let block = view!(BlockView, bytes);
|
let parent_hash = block.header_view().parent_hash();
|
||||||
let header = block.header_view();
|
|
||||||
|
|
||||||
let parent_hash = header.parent_hash();
|
|
||||||
let best_hash = self.best_block_hash();
|
let best_hash = self.best_block_hash();
|
||||||
|
|
||||||
let route = self.tree_route(best_hash, parent_hash).expect("forks are only kept when it has common ancestors; tree route from best to prospective's parent always exists; qed");
|
let route = self.tree_route(best_hash, parent_hash).expect("forks are only kept when it has common ancestors; tree route from best to prospective's parent always exists; qed");
|
||||||
|
|
||||||
self.insert_block_with_route(batch, bytes, receipts, route, extras)
|
self.insert_block_with_route(batch, block, receipts, route, extras)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Inserts the block into backing cache database with already generated route information.
|
/// Inserts the block into backing cache database with already generated route information.
|
||||||
/// Expects the block to be valid and already verified and route is tree route information from current best block to new block's parent.
|
/// Expects the block to be valid and already verified and route is tree route information from current best block to new block's parent.
|
||||||
/// If the block is already known, does nothing.
|
/// If the block is already known, does nothing.
|
||||||
pub fn insert_block_with_route(&self, batch: &mut DBTransaction, bytes: &[u8], receipts: Vec<Receipt>, route: TreeRoute, extras: ExtrasInsert) -> ImportRoute {
|
pub fn insert_block_with_route(&self, batch: &mut DBTransaction, block: encoded::Block, receipts: Vec<Receipt>, route: TreeRoute, extras: ExtrasInsert) -> ImportRoute {
|
||||||
// create views onto rlp
|
let hash = block.header_view().hash();
|
||||||
let block = view!(BlockView, bytes);
|
let parent_hash = block.header_view().parent_hash();
|
||||||
let header = block.header_view();
|
|
||||||
let hash = header.hash();
|
|
||||||
|
|
||||||
if self.is_known_child(&header.parent_hash(), &hash) {
|
if self.is_known_child(&parent_hash, &hash) {
|
||||||
return ImportRoute::none();
|
return ImportRoute::none();
|
||||||
}
|
}
|
||||||
|
|
||||||
assert!(self.pending_best_block.read().is_none());
|
assert!(self.pending_best_block.read().is_none());
|
||||||
|
|
||||||
let compressed_header = compress(block.header_rlp().as_raw(), blocks_swapper());
|
let compressed_header = compress(block.header_view().rlp().as_raw(), blocks_swapper());
|
||||||
let compressed_body = compress(&Self::block_to_body(bytes), blocks_swapper());
|
let compressed_body = compress(&Self::block_to_body(block.raw()), blocks_swapper());
|
||||||
|
|
||||||
// store block in db
|
// store block in db
|
||||||
batch.put(db::COL_HEADERS, &hash, &compressed_header);
|
batch.put(db::COL_HEADERS, &hash, &compressed_header);
|
||||||
batch.put(db::COL_BODIES, &hash, &compressed_body);
|
batch.put(db::COL_BODIES, &hash, &compressed_body);
|
||||||
|
|
||||||
let info = self.block_info(&header, route, &extras);
|
let info = self.block_info(&block.header_view(), route, &extras);
|
||||||
|
|
||||||
if let BlockLocation::BranchBecomingCanonChain(ref d) = info.location {
|
if let BlockLocation::BranchBecomingCanonChain(ref d) = info.location {
|
||||||
info!(target: "reorg", "Reorg to {} ({} {} {})",
|
info!(target: "reorg", "Reorg to {} ({} {} {})",
|
||||||
@ -1004,13 +996,13 @@ impl BlockChain {
|
|||||||
}
|
}
|
||||||
|
|
||||||
self.prepare_update(batch, ExtrasUpdate {
|
self.prepare_update(batch, ExtrasUpdate {
|
||||||
block_hashes: self.prepare_block_hashes_update(bytes, &info),
|
block_hashes: self.prepare_block_hashes_update(&info),
|
||||||
block_details: self.prepare_block_details_update(bytes, &info, extras.is_finalized, extras.metadata),
|
block_details: self.prepare_block_details_update(parent_hash, &info, extras.is_finalized),
|
||||||
block_receipts: self.prepare_block_receipts_update(receipts, &info),
|
block_receipts: self.prepare_block_receipts_update(receipts, &info),
|
||||||
blocks_blooms: self.prepare_block_blooms_update(bytes, &info),
|
blocks_blooms: self.prepare_block_blooms_update(block.header_view().log_bloom(), &info),
|
||||||
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
|
transactions_addresses: self.prepare_transaction_addresses_update(block.view().transaction_hashes(), &info),
|
||||||
info: info.clone(),
|
info: info.clone(),
|
||||||
block: bytes,
|
block,
|
||||||
}, true);
|
}, true);
|
||||||
|
|
||||||
ImportRoute::from(info)
|
ImportRoute::from(info)
|
||||||
@ -1090,11 +1082,10 @@ impl BlockChain {
|
|||||||
let mut best_block = self.pending_best_block.write();
|
let mut best_block = self.pending_best_block.write();
|
||||||
if is_best && update.info.location != BlockLocation::Branch {
|
if is_best && update.info.location != BlockLocation::Branch {
|
||||||
batch.put(db::COL_EXTRA, b"best", &update.info.hash);
|
batch.put(db::COL_EXTRA, b"best", &update.info.hash);
|
||||||
let block = encoded::Block::new(update.block.to_vec());
|
|
||||||
*best_block = Some(BestBlock {
|
*best_block = Some(BestBlock {
|
||||||
total_difficulty: update.info.total_difficulty,
|
total_difficulty: update.info.total_difficulty,
|
||||||
header: block.decode_header(),
|
header: update.block.decode_header(),
|
||||||
block,
|
block: update.block,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1214,16 +1205,13 @@ impl BlockChain {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// This function returns modified block hashes.
|
/// This function returns modified block hashes.
|
||||||
fn prepare_block_hashes_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap<BlockNumber, H256> {
|
fn prepare_block_hashes_update(&self, info: &BlockInfo) -> HashMap<BlockNumber, H256> {
|
||||||
let mut block_hashes = HashMap::new();
|
let mut block_hashes = HashMap::new();
|
||||||
let block = view!(BlockView, block_bytes);
|
|
||||||
let header = block.header_view();
|
|
||||||
let number = header.number();
|
|
||||||
|
|
||||||
match info.location {
|
match info.location {
|
||||||
BlockLocation::Branch => (),
|
BlockLocation::Branch => (),
|
||||||
BlockLocation::CanonChain => {
|
BlockLocation::CanonChain => {
|
||||||
block_hashes.insert(number, info.hash);
|
block_hashes.insert(info.number, info.hash);
|
||||||
},
|
},
|
||||||
BlockLocation::BranchBecomingCanonChain(ref data) => {
|
BlockLocation::BranchBecomingCanonChain(ref data) => {
|
||||||
let ancestor_number = self.block_number(&data.ancestor).expect("Block number of ancestor is always in DB");
|
let ancestor_number = self.block_number(&data.ancestor).expect("Block number of ancestor is always in DB");
|
||||||
@ -1233,7 +1221,7 @@ impl BlockChain {
|
|||||||
block_hashes.insert(start_number + index as BlockNumber, hash);
|
block_hashes.insert(start_number + index as BlockNumber, hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
block_hashes.insert(number, info.hash);
|
block_hashes.insert(info.number, info.hash);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1242,23 +1230,18 @@ impl BlockChain {
|
|||||||
|
|
||||||
/// This function returns modified block details.
|
/// This function returns modified block details.
|
||||||
/// Uses the given parent details or attempts to load them from the database.
|
/// Uses the given parent details or attempts to load them from the database.
|
||||||
fn prepare_block_details_update(&self, block_bytes: &[u8], info: &BlockInfo, is_finalized: bool, metadata: Option<Vec<u8>>) -> HashMap<H256, BlockDetails> {
|
fn prepare_block_details_update(&self, parent_hash: H256, info: &BlockInfo, is_finalized: bool) -> HashMap<H256, BlockDetails> {
|
||||||
let block = view!(BlockView, block_bytes);
|
|
||||||
let header = block.header_view();
|
|
||||||
let parent_hash = header.parent_hash();
|
|
||||||
|
|
||||||
// update parent
|
// update parent
|
||||||
let mut parent_details = self.block_details(&parent_hash).unwrap_or_else(|| panic!("Invalid parent hash: {:?}", parent_hash));
|
let mut parent_details = self.block_details(&parent_hash).unwrap_or_else(|| panic!("Invalid parent hash: {:?}", parent_hash));
|
||||||
parent_details.children.push(info.hash);
|
parent_details.children.push(info.hash);
|
||||||
|
|
||||||
// create current block details.
|
// create current block details.
|
||||||
let details = BlockDetails {
|
let details = BlockDetails {
|
||||||
number: header.number(),
|
number: info.number,
|
||||||
total_difficulty: info.total_difficulty,
|
total_difficulty: info.total_difficulty,
|
||||||
parent: parent_hash,
|
parent: parent_hash,
|
||||||
children: vec![],
|
children: vec![],
|
||||||
is_finalized: is_finalized,
|
is_finalized: is_finalized,
|
||||||
metadata: metadata,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// write to batch
|
// write to batch
|
||||||
@ -1276,10 +1259,7 @@ impl BlockChain {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// This function returns modified transaction addresses.
|
/// This function returns modified transaction addresses.
|
||||||
fn prepare_transaction_addresses_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap<H256, Option<TransactionAddress>> {
|
fn prepare_transaction_addresses_update(&self, transaction_hashes: Vec<H256>, info: &BlockInfo) -> HashMap<H256, Option<TransactionAddress>> {
|
||||||
let block = view!(BlockView, block_bytes);
|
|
||||||
let transaction_hashes = block.transaction_hashes();
|
|
||||||
|
|
||||||
match info.location {
|
match info.location {
|
||||||
BlockLocation::CanonChain => {
|
BlockLocation::CanonChain => {
|
||||||
transaction_hashes.into_iter()
|
transaction_hashes.into_iter()
|
||||||
@ -1344,14 +1324,10 @@ impl BlockChain {
|
|||||||
/// Later, BloomIndexer is used to map bloom location on filter layer (BloomIndex)
|
/// Later, BloomIndexer is used to map bloom location on filter layer (BloomIndex)
|
||||||
/// to bloom location in database (BlocksBloomLocation).
|
/// to bloom location in database (BlocksBloomLocation).
|
||||||
///
|
///
|
||||||
fn prepare_block_blooms_update(&self, block_bytes: &[u8], info: &BlockInfo) -> Option<(u64, Vec<Bloom>)> {
|
fn prepare_block_blooms_update(&self, log_bloom: Bloom, info: &BlockInfo) -> Option<(u64, Vec<Bloom>)> {
|
||||||
let block = view!(BlockView, block_bytes);
|
|
||||||
let header = block.header_view();
|
|
||||||
|
|
||||||
match info.location {
|
match info.location {
|
||||||
BlockLocation::Branch => None,
|
BlockLocation::Branch => None,
|
||||||
BlockLocation::CanonChain => {
|
BlockLocation::CanonChain => {
|
||||||
let log_bloom = header.log_bloom();
|
|
||||||
if log_bloom.is_zero() {
|
if log_bloom.is_zero() {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
@ -1369,7 +1345,7 @@ impl BlockChain {
|
|||||||
.map(|h| h.log_bloom())
|
.map(|h| h.log_bloom())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
blooms.push(header.log_bloom());
|
blooms.push(log_bloom);
|
||||||
Some((start_number, blooms))
|
Some((start_number, blooms))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1505,18 +1481,19 @@ mod tests {
|
|||||||
use log_entry::{LogEntry, LocalizedLogEntry};
|
use log_entry::{LogEntry, LocalizedLogEntry};
|
||||||
use ethkey::Secret;
|
use ethkey::Secret;
|
||||||
use test_helpers::new_db;
|
use test_helpers::new_db;
|
||||||
|
use encoded;
|
||||||
|
|
||||||
fn new_chain(genesis: &[u8], db: Arc<BlockChainDB>) -> BlockChain {
|
fn new_chain(genesis: encoded::Block, db: Arc<BlockChainDB>) -> BlockChain {
|
||||||
BlockChain::new(Config::default(), genesis, db)
|
BlockChain::new(Config::default(), genesis.raw(), db)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn insert_block(db: &Arc<BlockChainDB>, bc: &BlockChain, bytes: &[u8], receipts: Vec<Receipt>) -> ImportRoute {
|
fn insert_block(db: &Arc<BlockChainDB>, bc: &BlockChain, block: encoded::Block, receipts: Vec<Receipt>) -> ImportRoute {
|
||||||
insert_block_commit(db, bc, bytes, receipts, true)
|
insert_block_commit(db, bc, block, receipts, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn insert_block_commit(db: &Arc<BlockChainDB>, bc: &BlockChain, bytes: &[u8], receipts: Vec<Receipt>, commit: bool) -> ImportRoute {
|
fn insert_block_commit(db: &Arc<BlockChainDB>, bc: &BlockChain, block: encoded::Block, receipts: Vec<Receipt>, commit: bool) -> ImportRoute {
|
||||||
let mut batch = db.key_value().transaction();
|
let mut batch = db.key_value().transaction();
|
||||||
let res = insert_block_batch(&mut batch, bc, bytes, receipts);
|
let res = insert_block_batch(&mut batch, bc, block, receipts);
|
||||||
db.key_value().write(batch).unwrap();
|
db.key_value().write(batch).unwrap();
|
||||||
if commit {
|
if commit {
|
||||||
bc.commit();
|
bc.commit();
|
||||||
@ -1524,25 +1501,24 @@ mod tests {
|
|||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
fn insert_block_batch(batch: &mut DBTransaction, bc: &BlockChain, bytes: &[u8], receipts: Vec<Receipt>) -> ImportRoute {
|
fn insert_block_batch(batch: &mut DBTransaction, bc: &BlockChain, block: encoded::Block, receipts: Vec<Receipt>) -> ImportRoute {
|
||||||
use views::BlockView;
|
|
||||||
use blockchain::ExtrasInsert;
|
use blockchain::ExtrasInsert;
|
||||||
|
|
||||||
let block = view!(BlockView, bytes);
|
let fork_choice = {
|
||||||
let header = block.header_view();
|
let header = block.header_view();
|
||||||
let parent_hash = header.parent_hash();
|
let parent_hash = header.parent_hash();
|
||||||
let parent_details = bc.block_details(&parent_hash).unwrap_or_else(|| panic!("Invalid parent hash: {:?}", parent_hash));
|
let parent_details = bc.block_details(&parent_hash).unwrap_or_else(|| panic!("Invalid parent hash: {:?}", parent_hash));
|
||||||
let block_total_difficulty = parent_details.total_difficulty + header.difficulty();
|
let block_total_difficulty = parent_details.total_difficulty + header.difficulty();
|
||||||
let fork_choice = if block_total_difficulty > bc.best_block_total_difficulty() {
|
if block_total_difficulty > bc.best_block_total_difficulty() {
|
||||||
::engines::ForkChoice::New
|
::engines::ForkChoice::New
|
||||||
} else {
|
} else {
|
||||||
::engines::ForkChoice::Old
|
::engines::ForkChoice::Old
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
bc.insert_block(batch, bytes, receipts, ExtrasInsert {
|
bc.insert_block(batch, block, receipts, ExtrasInsert {
|
||||||
fork_choice: fork_choice,
|
fork_choice: fork_choice,
|
||||||
is_finalized: false,
|
is_finalized: false,
|
||||||
metadata: None
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1553,11 +1529,11 @@ mod tests {
|
|||||||
let first = genesis.add_block();
|
let first = genesis.add_block();
|
||||||
|
|
||||||
let db = new_db();
|
let db = new_db();
|
||||||
let bc = new_chain(&genesis.last().encoded(), db.clone());
|
let bc = new_chain(genesis.last().encoded(), db.clone());
|
||||||
assert_eq!(bc.best_block_number(), 0);
|
assert_eq!(bc.best_block_number(), 0);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
insert_block_commit(&db, &bc, &first.last().encoded(), vec![], false);
|
insert_block_commit(&db, &bc, first.last().encoded(), vec![], false);
|
||||||
assert_eq!(bc.best_block_number(), 0);
|
assert_eq!(bc.best_block_number(), 0);
|
||||||
bc.commit();
|
bc.commit();
|
||||||
// NOTE no db.write here (we want to check if best block is cached)
|
// NOTE no db.write here (we want to check if best block is cached)
|
||||||
@ -1578,7 +1554,7 @@ mod tests {
|
|||||||
let first_hash = first.hash();
|
let first_hash = first.hash();
|
||||||
|
|
||||||
let db = new_db();
|
let db = new_db();
|
||||||
let bc = new_chain(&genesis.encoded(), db.clone());
|
let bc = new_chain(genesis.encoded(), db.clone());
|
||||||
|
|
||||||
assert_eq!(bc.genesis_hash(), genesis_hash);
|
assert_eq!(bc.genesis_hash(), genesis_hash);
|
||||||
assert_eq!(bc.best_block_hash(), genesis_hash);
|
assert_eq!(bc.best_block_hash(), genesis_hash);
|
||||||
@ -1587,7 +1563,7 @@ mod tests {
|
|||||||
assert_eq!(bc.block_details(&genesis_hash).unwrap().children, vec![]);
|
assert_eq!(bc.block_details(&genesis_hash).unwrap().children, vec![]);
|
||||||
|
|
||||||
let mut batch = db.key_value().transaction();
|
let mut batch = db.key_value().transaction();
|
||||||
insert_block_batch(&mut batch, &bc, &first.encoded(), vec![]);
|
insert_block_batch(&mut batch, &bc, first.encoded(), vec![]);
|
||||||
db.key_value().write(batch).unwrap();
|
db.key_value().write(batch).unwrap();
|
||||||
bc.commit();
|
bc.commit();
|
||||||
|
|
||||||
@ -1607,13 +1583,13 @@ mod tests {
|
|||||||
let generator = BlockGenerator::new(vec![first_10]);
|
let generator = BlockGenerator::new(vec![first_10]);
|
||||||
|
|
||||||
let db = new_db();
|
let db = new_db();
|
||||||
let bc = new_chain(&genesis.last().encoded(), db.clone());
|
let bc = new_chain(genesis.last().encoded(), db.clone());
|
||||||
|
|
||||||
let mut block_hashes = vec![genesis.last().hash()];
|
let mut block_hashes = vec![genesis.last().hash()];
|
||||||
let mut batch = db.key_value().transaction();
|
let mut batch = db.key_value().transaction();
|
||||||
for block in generator {
|
for block in generator {
|
||||||
block_hashes.push(block.hash());
|
block_hashes.push(block.hash());
|
||||||
insert_block_batch(&mut batch, &bc, &block.encoded(), vec![]);
|
insert_block_batch(&mut batch, &bc, block.encoded(), vec![]);
|
||||||
bc.commit();
|
bc.commit();
|
||||||
}
|
}
|
||||||
db.key_value().write(batch).unwrap();
|
db.key_value().write(batch).unwrap();
|
||||||
@ -1651,10 +1627,10 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
let db = new_db();
|
let db = new_db();
|
||||||
let bc = new_chain(&genesis.last().encoded(), db.clone());
|
let bc = new_chain(genesis.last().encoded(), db.clone());
|
||||||
|
|
||||||
for b in generator {
|
for b in generator {
|
||||||
insert_block(&db, &bc, &b.encoded(), vec![]);
|
insert_block(&db, &bc, b.encoded(), vec![]);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(uncle_headers, bc.find_uncle_headers(&b4a_hash, 3).unwrap());
|
assert_eq!(uncle_headers, bc.find_uncle_headers(&b4a_hash, 3).unwrap());
|
||||||
@ -1687,12 +1663,12 @@ mod tests {
|
|||||||
let b2_hash = b2.last().hash();
|
let b2_hash = b2.last().hash();
|
||||||
|
|
||||||
let db = new_db();
|
let db = new_db();
|
||||||
let bc = new_chain(&genesis.last().encoded(), db.clone());
|
let bc = new_chain(genesis.last().encoded(), db.clone());
|
||||||
|
|
||||||
let mut batch = db.key_value().transaction();
|
let mut batch = db.key_value().transaction();
|
||||||
let _ = insert_block_batch(&mut batch, &bc, &b1a.last().encoded(), vec![]);
|
let _ = insert_block_batch(&mut batch, &bc, b1a.last().encoded(), vec![]);
|
||||||
bc.commit();
|
bc.commit();
|
||||||
let _ = insert_block_batch(&mut batch, &bc, &b1b.last().encoded(), vec![]);
|
let _ = insert_block_batch(&mut batch, &bc, b1b.last().encoded(), vec![]);
|
||||||
bc.commit();
|
bc.commit();
|
||||||
db.key_value().write(batch).unwrap();
|
db.key_value().write(batch).unwrap();
|
||||||
|
|
||||||
@ -1704,7 +1680,7 @@ mod tests {
|
|||||||
|
|
||||||
// now let's make forked chain the canon chain
|
// now let's make forked chain the canon chain
|
||||||
let mut batch = db.key_value().transaction();
|
let mut batch = db.key_value().transaction();
|
||||||
let _ = insert_block_batch(&mut batch, &bc, &b2.last().encoded(), vec![]);
|
let _ = insert_block_batch(&mut batch, &bc, b2.last().encoded(), vec![]);
|
||||||
bc.commit();
|
bc.commit();
|
||||||
db.key_value().write(batch).unwrap();
|
db.key_value().write(batch).unwrap();
|
||||||
|
|
||||||
@ -1762,12 +1738,12 @@ mod tests {
|
|||||||
let t3_hash = t3.hash();
|
let t3_hash = t3.hash();
|
||||||
|
|
||||||
let db = new_db();
|
let db = new_db();
|
||||||
let bc = new_chain(&genesis.last().encoded(), db.clone());
|
let bc = new_chain(genesis.last().encoded(), db.clone());
|
||||||
|
|
||||||
let mut batch = db.key_value().transaction();
|
let mut batch = db.key_value().transaction();
|
||||||
let _ = insert_block_batch(&mut batch, &bc, &b1a.last().encoded(), vec![]);
|
let _ = insert_block_batch(&mut batch, &bc, b1a.last().encoded(), vec![]);
|
||||||
bc.commit();
|
bc.commit();
|
||||||
let _ = insert_block_batch(&mut batch, &bc, &b1b.last().encoded(), vec![]);
|
let _ = insert_block_batch(&mut batch, &bc, b1b.last().encoded(), vec![]);
|
||||||
bc.commit();
|
bc.commit();
|
||||||
db.key_value().write(batch).unwrap();
|
db.key_value().write(batch).unwrap();
|
||||||
|
|
||||||
@ -1783,7 +1759,7 @@ mod tests {
|
|||||||
|
|
||||||
// now let's make forked chain the canon chain
|
// now let's make forked chain the canon chain
|
||||||
let mut batch = db.key_value().transaction();
|
let mut batch = db.key_value().transaction();
|
||||||
let _ = insert_block_batch(&mut batch, &bc, &b2.last().encoded(), vec![]);
|
let _ = insert_block_batch(&mut batch, &bc, b2.last().encoded(), vec![]);
|
||||||
bc.commit();
|
bc.commit();
|
||||||
db.key_value().write(batch).unwrap();
|
db.key_value().write(batch).unwrap();
|
||||||
|
|
||||||
@ -1820,19 +1796,19 @@ mod tests {
|
|||||||
let best_block_hash = b3a_hash;
|
let best_block_hash = b3a_hash;
|
||||||
|
|
||||||
let db = new_db();
|
let db = new_db();
|
||||||
let bc = new_chain(&genesis.last().encoded(), db.clone());
|
let bc = new_chain(genesis.last().encoded(), db.clone());
|
||||||
|
|
||||||
let mut batch = db.key_value().transaction();
|
let mut batch = db.key_value().transaction();
|
||||||
let ir1 = insert_block_batch(&mut batch, &bc, &b1.last().encoded(), vec![]);
|
let ir1 = insert_block_batch(&mut batch, &bc, b1.last().encoded(), vec![]);
|
||||||
bc.commit();
|
bc.commit();
|
||||||
let ir2 = insert_block_batch(&mut batch, &bc, &b2.last().encoded(), vec![]);
|
let ir2 = insert_block_batch(&mut batch, &bc, b2.last().encoded(), vec![]);
|
||||||
bc.commit();
|
bc.commit();
|
||||||
let ir3b = insert_block_batch(&mut batch, &bc, &b3b.last().encoded(), vec![]);
|
let ir3b = insert_block_batch(&mut batch, &bc, b3b.last().encoded(), vec![]);
|
||||||
bc.commit();
|
bc.commit();
|
||||||
db.key_value().write(batch).unwrap();
|
db.key_value().write(batch).unwrap();
|
||||||
assert_eq!(bc.block_hash(3).unwrap(), b3b_hash);
|
assert_eq!(bc.block_hash(3).unwrap(), b3b_hash);
|
||||||
let mut batch = db.key_value().transaction();
|
let mut batch = db.key_value().transaction();
|
||||||
let ir3a = insert_block_batch(&mut batch, &bc, &b3a.last().encoded(), vec![]);
|
let ir3a = insert_block_batch(&mut batch, &bc, b3a.last().encoded(), vec![]);
|
||||||
bc.commit();
|
bc.commit();
|
||||||
db.key_value().write(batch).unwrap();
|
db.key_value().write(batch).unwrap();
|
||||||
|
|
||||||
@ -1934,17 +1910,17 @@ mod tests {
|
|||||||
let db = new_db();
|
let db = new_db();
|
||||||
|
|
||||||
{
|
{
|
||||||
let bc = new_chain(&genesis.last().encoded(), db.clone());
|
let bc = new_chain(genesis.last().encoded(), db.clone());
|
||||||
assert_eq!(bc.best_block_hash(), genesis_hash);
|
assert_eq!(bc.best_block_hash(), genesis_hash);
|
||||||
let mut batch = db.key_value().transaction();
|
let mut batch = db.key_value().transaction();
|
||||||
insert_block_batch(&mut batch, &bc, &first.last().encoded(), vec![]);
|
insert_block_batch(&mut batch, &bc, first.last().encoded(), vec![]);
|
||||||
db.key_value().write(batch).unwrap();
|
db.key_value().write(batch).unwrap();
|
||||||
bc.commit();
|
bc.commit();
|
||||||
assert_eq!(bc.best_block_hash(), first_hash);
|
assert_eq!(bc.best_block_hash(), first_hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let bc = new_chain(&genesis.last().encoded(), db.clone());
|
let bc = new_chain(genesis.last().encoded(), db.clone());
|
||||||
|
|
||||||
assert_eq!(bc.best_block_hash(), first_hash);
|
assert_eq!(bc.best_block_hash(), first_hash);
|
||||||
}
|
}
|
||||||
@ -1994,9 +1970,9 @@ mod tests {
|
|||||||
let b1_hash: H256 = "f53f268d23a71e85c7d6d83a9504298712b84c1a2ba220441c86eeda0bf0b6e3".into();
|
let b1_hash: H256 = "f53f268d23a71e85c7d6d83a9504298712b84c1a2ba220441c86eeda0bf0b6e3".into();
|
||||||
|
|
||||||
let db = new_db();
|
let db = new_db();
|
||||||
let bc = new_chain(&genesis, db.clone());
|
let bc = new_chain(encoded::Block::new(genesis), db.clone());
|
||||||
let mut batch = db.key_value().transaction();
|
let mut batch = db.key_value().transaction();
|
||||||
insert_block_batch(&mut batch, &bc, &b1, vec![]);
|
insert_block_batch(&mut batch, &bc, encoded::Block::new(b1), vec![]);
|
||||||
db.key_value().write(batch).unwrap();
|
db.key_value().write(batch).unwrap();
|
||||||
bc.commit();
|
bc.commit();
|
||||||
|
|
||||||
@ -2062,8 +2038,8 @@ mod tests {
|
|||||||
let b3_number = b3.last().number();
|
let b3_number = b3.last().number();
|
||||||
|
|
||||||
let db = new_db();
|
let db = new_db();
|
||||||
let bc = new_chain(&genesis.last().encoded(), db.clone());
|
let bc = new_chain(genesis.last().encoded(), db.clone());
|
||||||
insert_block(&db, &bc, &b1.last().encoded(), vec![Receipt {
|
insert_block(&db, &bc, b1.last().encoded(), vec![Receipt {
|
||||||
outcome: TransactionOutcome::StateRoot(H256::default()),
|
outcome: TransactionOutcome::StateRoot(H256::default()),
|
||||||
gas_used: 10_000.into(),
|
gas_used: 10_000.into(),
|
||||||
log_bloom: Default::default(),
|
log_bloom: Default::default(),
|
||||||
@ -2080,7 +2056,7 @@ mod tests {
|
|||||||
LogEntry { address: Default::default(), topics: vec![], data: vec![3], },
|
LogEntry { address: Default::default(), topics: vec![], data: vec![3], },
|
||||||
],
|
],
|
||||||
}]);
|
}]);
|
||||||
insert_block(&db, &bc, &b2.last().encoded(), vec![
|
insert_block(&db, &bc, b2.last().encoded(), vec![
|
||||||
Receipt {
|
Receipt {
|
||||||
outcome: TransactionOutcome::StateRoot(H256::default()),
|
outcome: TransactionOutcome::StateRoot(H256::default()),
|
||||||
gas_used: 10_000.into(),
|
gas_used: 10_000.into(),
|
||||||
@ -2090,7 +2066,7 @@ mod tests {
|
|||||||
],
|
],
|
||||||
}
|
}
|
||||||
]);
|
]);
|
||||||
insert_block(&db, &bc, &b3.last().encoded(), vec![
|
insert_block(&db, &bc, b3.last().encoded(), vec![
|
||||||
Receipt {
|
Receipt {
|
||||||
outcome: TransactionOutcome::StateRoot(H256::default()),
|
outcome: TransactionOutcome::StateRoot(H256::default()),
|
||||||
gas_used: 10_000.into(),
|
gas_used: 10_000.into(),
|
||||||
@ -2190,27 +2166,27 @@ mod tests {
|
|||||||
let b2a = b1a.add_block_with_bloom(bloom_ba);
|
let b2a = b1a.add_block_with_bloom(bloom_ba);
|
||||||
|
|
||||||
let db = new_db();
|
let db = new_db();
|
||||||
let bc = new_chain(&genesis.last().encoded(), db.clone());
|
let bc = new_chain(genesis.last().encoded(), db.clone());
|
||||||
|
|
||||||
let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5);
|
let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5);
|
||||||
let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5);
|
let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5);
|
||||||
assert!(blocks_b1.is_empty());
|
assert!(blocks_b1.is_empty());
|
||||||
assert!(blocks_b2.is_empty());
|
assert!(blocks_b2.is_empty());
|
||||||
|
|
||||||
insert_block(&db, &bc, &b1.last().encoded(), vec![]);
|
insert_block(&db, &bc, b1.last().encoded(), vec![]);
|
||||||
let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5);
|
let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5);
|
||||||
let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5);
|
let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5);
|
||||||
assert_eq!(blocks_b1, vec![1]);
|
assert_eq!(blocks_b1, vec![1]);
|
||||||
assert!(blocks_b2.is_empty());
|
assert!(blocks_b2.is_empty());
|
||||||
|
|
||||||
insert_block(&db, &bc, &b2.last().encoded(), vec![]);
|
insert_block(&db, &bc, b2.last().encoded(), vec![]);
|
||||||
let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5);
|
let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5);
|
||||||
let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5);
|
let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5);
|
||||||
assert_eq!(blocks_b1, vec![1]);
|
assert_eq!(blocks_b1, vec![1]);
|
||||||
assert_eq!(blocks_b2, vec![2]);
|
assert_eq!(blocks_b2, vec![2]);
|
||||||
|
|
||||||
// hasn't been forked yet
|
// hasn't been forked yet
|
||||||
insert_block(&db, &bc, &b1a.last().encoded(), vec![]);
|
insert_block(&db, &bc, b1a.last().encoded(), vec![]);
|
||||||
let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5);
|
let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5);
|
||||||
let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5);
|
let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5);
|
||||||
let blocks_ba = bc.blocks_with_bloom(Some(&bloom_ba), 0, 5);
|
let blocks_ba = bc.blocks_with_bloom(Some(&bloom_ba), 0, 5);
|
||||||
@ -2219,7 +2195,7 @@ mod tests {
|
|||||||
assert!(blocks_ba.is_empty());
|
assert!(blocks_ba.is_empty());
|
||||||
|
|
||||||
// fork has happend
|
// fork has happend
|
||||||
insert_block(&db, &bc, &b2a.last().encoded(), vec![]);
|
insert_block(&db, &bc, b2a.last().encoded(), vec![]);
|
||||||
let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5);
|
let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5);
|
||||||
let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5);
|
let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5);
|
||||||
let blocks_ba = bc.blocks_with_bloom(Some(&bloom_ba), 0, 5);
|
let blocks_ba = bc.blocks_with_bloom(Some(&bloom_ba), 0, 5);
|
||||||
@ -2228,7 +2204,7 @@ mod tests {
|
|||||||
assert_eq!(blocks_ba, vec![1, 2]);
|
assert_eq!(blocks_ba, vec![1, 2]);
|
||||||
|
|
||||||
// fork back
|
// fork back
|
||||||
insert_block(&db, &bc, &b3.last().encoded(), vec![]);
|
insert_block(&db, &bc, b3.last().encoded(), vec![]);
|
||||||
let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5);
|
let blocks_b1 = bc.blocks_with_bloom(Some(&bloom_b1), 0, 5);
|
||||||
let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5);
|
let blocks_b2 = bc.blocks_with_bloom(Some(&bloom_b2), 0, 5);
|
||||||
let blocks_ba = bc.blocks_with_bloom(Some(&bloom_ba), 0, 5);
|
let blocks_ba = bc.blocks_with_bloom(Some(&bloom_ba), 0, 5);
|
||||||
@ -2252,13 +2228,13 @@ mod tests {
|
|||||||
let b1_total_difficulty = genesis.last().difficulty() + b1.last().difficulty();
|
let b1_total_difficulty = genesis.last().difficulty() + b1.last().difficulty();
|
||||||
|
|
||||||
let db = new_db();
|
let db = new_db();
|
||||||
let bc = new_chain(&genesis.last().encoded(), db.clone());
|
let bc = new_chain(genesis.last().encoded(), db.clone());
|
||||||
let mut batch = db.key_value().transaction();
|
let mut batch = db.key_value().transaction();
|
||||||
bc.insert_unordered_block(&mut batch, &b2.last().encoded(), vec![], Some(b1_total_difficulty), false, false);
|
bc.insert_unordered_block(&mut batch, b2.last().encoded(), vec![], Some(b1_total_difficulty), false, false);
|
||||||
bc.commit();
|
bc.commit();
|
||||||
bc.insert_unordered_block(&mut batch, &b3.last().encoded(), vec![], None, true, false);
|
bc.insert_unordered_block(&mut batch, b3.last().encoded(), vec![], None, true, false);
|
||||||
bc.commit();
|
bc.commit();
|
||||||
bc.insert_unordered_block(&mut batch, &b1.last().encoded(), vec![], None, false, false);
|
bc.insert_unordered_block(&mut batch, b1.last().encoded(), vec![], None, false, false);
|
||||||
bc.commit();
|
bc.commit();
|
||||||
db.key_value().write(batch).unwrap();
|
db.key_value().write(batch).unwrap();
|
||||||
|
|
||||||
@ -2285,23 +2261,23 @@ mod tests {
|
|||||||
|
|
||||||
let db = new_db();
|
let db = new_db();
|
||||||
{
|
{
|
||||||
let bc = new_chain(&genesis.last().encoded(), db.clone());
|
let bc = new_chain(genesis.last().encoded(), db.clone());
|
||||||
|
|
||||||
let mut batch = db.key_value().transaction();
|
let mut batch = db.key_value().transaction();
|
||||||
// create a longer fork
|
// create a longer fork
|
||||||
for block in generator {
|
for block in generator {
|
||||||
insert_block_batch(&mut batch, &bc, &block.encoded(), vec![]);
|
insert_block_batch(&mut batch, &bc, block.encoded(), vec![]);
|
||||||
bc.commit();
|
bc.commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(bc.best_block_number(), 5);
|
assert_eq!(bc.best_block_number(), 5);
|
||||||
insert_block_batch(&mut batch, &bc, &uncle.last().encoded(), vec![]);
|
insert_block_batch(&mut batch, &bc, uncle.last().encoded(), vec![]);
|
||||||
db.key_value().write(batch).unwrap();
|
db.key_value().write(batch).unwrap();
|
||||||
bc.commit();
|
bc.commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
// re-loading the blockchain should load the correct best block.
|
// re-loading the blockchain should load the correct best block.
|
||||||
let bc = new_chain(&genesis.last().encoded(), db);
|
let bc = new_chain(genesis.last().encoded(), db);
|
||||||
assert_eq!(bc.best_block_number(), 5);
|
assert_eq!(bc.best_block_number(), 5);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2316,13 +2292,13 @@ mod tests {
|
|||||||
|
|
||||||
let db = new_db();
|
let db = new_db();
|
||||||
{
|
{
|
||||||
let bc = new_chain(&genesis.last().encoded(), db.clone());
|
let bc = new_chain(genesis.last().encoded(), db.clone());
|
||||||
|
|
||||||
let mut batch = db.key_value().transaction();
|
let mut batch = db.key_value().transaction();
|
||||||
// create a longer fork
|
// create a longer fork
|
||||||
for (i, block) in generator.into_iter().enumerate() {
|
for (i, block) in generator.into_iter().enumerate() {
|
||||||
|
|
||||||
insert_block_batch(&mut batch, &bc, &block.encoded(), vec![]);
|
insert_block_batch(&mut batch, &bc, block.encoded(), vec![]);
|
||||||
bc.insert_epoch_transition(&mut batch, i as u64, EpochTransition {
|
bc.insert_epoch_transition(&mut batch, i as u64, EpochTransition {
|
||||||
block_hash: block.hash(),
|
block_hash: block.hash(),
|
||||||
block_number: i as u64 + 1,
|
block_number: i as u64 + 1,
|
||||||
@ -2333,7 +2309,7 @@ mod tests {
|
|||||||
|
|
||||||
assert_eq!(bc.best_block_number(), 5);
|
assert_eq!(bc.best_block_number(), 5);
|
||||||
|
|
||||||
insert_block_batch(&mut batch, &bc, &uncle.last().encoded(), vec![]);
|
insert_block_batch(&mut batch, &bc, uncle.last().encoded(), vec![]);
|
||||||
bc.insert_epoch_transition(&mut batch, 999, EpochTransition {
|
bc.insert_epoch_transition(&mut batch, 999, EpochTransition {
|
||||||
block_hash: uncle.last().hash(),
|
block_hash: uncle.last().hash(),
|
||||||
block_number: 1,
|
block_number: 1,
|
||||||
@ -2348,7 +2324,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// re-loading the blockchain should load the correct best block.
|
// re-loading the blockchain should load the correct best block.
|
||||||
let bc = new_chain(&genesis.last().encoded(), db);
|
let bc = new_chain(genesis.last().encoded(), db);
|
||||||
|
|
||||||
assert_eq!(bc.best_block_number(), 5);
|
assert_eq!(bc.best_block_number(), 5);
|
||||||
assert_eq!(bc.epoch_transitions().map(|(i, _)| i).collect::<Vec<_>>(), vec![0, 1, 2, 3, 4]);
|
assert_eq!(bc.epoch_transitions().map(|(i, _)| i).collect::<Vec<_>>(), vec![0, 1, 2, 3, 4]);
|
||||||
@ -2369,7 +2345,7 @@ mod tests {
|
|||||||
|
|
||||||
let db = new_db();
|
let db = new_db();
|
||||||
|
|
||||||
let bc = new_chain(&genesis.last().encoded(), db.clone());
|
let bc = new_chain(genesis.last().encoded(), db.clone());
|
||||||
|
|
||||||
let mut batch = db.key_value().transaction();
|
let mut batch = db.key_value().transaction();
|
||||||
bc.insert_epoch_transition(&mut batch, 0, EpochTransition {
|
bc.insert_epoch_transition(&mut batch, 0, EpochTransition {
|
||||||
@ -2383,7 +2359,7 @@ mod tests {
|
|||||||
// and a non-canonical fork of 8 from genesis.
|
// and a non-canonical fork of 8 from genesis.
|
||||||
let fork_hash = {
|
let fork_hash = {
|
||||||
for block in fork_generator {
|
for block in fork_generator {
|
||||||
insert_block(&db, &bc, &block.encoded(), vec![]);
|
insert_block(&db, &bc, block.encoded(), vec![]);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(bc.best_block_number(), 7);
|
assert_eq!(bc.best_block_number(), 7);
|
||||||
@ -2391,7 +2367,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
|
|
||||||
for block in next_generator {
|
for block in next_generator {
|
||||||
insert_block(&db, &bc, &block.encoded(), vec![]);
|
insert_block(&db, &bc, block.encoded(), vec![]);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(bc.best_block_number(), 10);
|
assert_eq!(bc.best_block_number(), 10);
|
||||||
|
@ -152,17 +152,15 @@ pub struct BlockDetails {
|
|||||||
pub children: Vec<H256>,
|
pub children: Vec<H256>,
|
||||||
/// Whether the block is considered finalized
|
/// Whether the block is considered finalized
|
||||||
pub is_finalized: bool,
|
pub is_finalized: bool,
|
||||||
/// Additional block metadata
|
|
||||||
pub metadata: Option<Vec<u8>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl rlp::Encodable for BlockDetails {
|
impl rlp::Encodable for BlockDetails {
|
||||||
fn rlp_append(&self, stream: &mut rlp::RlpStream) {
|
fn rlp_append(&self, stream: &mut rlp::RlpStream) {
|
||||||
let use_short_version = self.metadata.is_none() && !self.is_finalized;
|
let use_short_version = !self.is_finalized;
|
||||||
|
|
||||||
match use_short_version {
|
match use_short_version {
|
||||||
true => { stream.begin_list(4); },
|
true => { stream.begin_list(4); },
|
||||||
false => { stream.begin_list(6); },
|
false => { stream.begin_list(5); },
|
||||||
}
|
}
|
||||||
|
|
||||||
stream.append(&self.number);
|
stream.append(&self.number);
|
||||||
@ -171,7 +169,6 @@ impl rlp::Encodable for BlockDetails {
|
|||||||
stream.append_list(&self.children);
|
stream.append_list(&self.children);
|
||||||
if !use_short_version {
|
if !use_short_version {
|
||||||
stream.append(&self.is_finalized);
|
stream.append(&self.is_finalized);
|
||||||
stream.append(&self.metadata);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -180,7 +177,7 @@ impl rlp::Decodable for BlockDetails {
|
|||||||
fn decode(rlp: &rlp::Rlp) -> Result<Self, rlp::DecoderError> {
|
fn decode(rlp: &rlp::Rlp) -> Result<Self, rlp::DecoderError> {
|
||||||
let use_short_version = match rlp.item_count()? {
|
let use_short_version = match rlp.item_count()? {
|
||||||
4 => true,
|
4 => true,
|
||||||
6 => false,
|
5 => false,
|
||||||
_ => return Err(rlp::DecoderError::RlpIncorrectListLen),
|
_ => return Err(rlp::DecoderError::RlpIncorrectListLen),
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -194,11 +191,6 @@ impl rlp::Decodable for BlockDetails {
|
|||||||
} else {
|
} else {
|
||||||
rlp.val_at(4)?
|
rlp.val_at(4)?
|
||||||
},
|
},
|
||||||
metadata: if use_short_version {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
rlp.val_at(5)?
|
|
||||||
},
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,11 +19,11 @@
|
|||||||
use std::collections::VecDeque;
|
use std::collections::VecDeque;
|
||||||
use ethereum_types::{U256, H256, Bloom};
|
use ethereum_types::{U256, H256, Bloom};
|
||||||
|
|
||||||
use bytes::Bytes;
|
|
||||||
use header::Header;
|
use header::Header;
|
||||||
use rlp::encode;
|
use rlp::encode;
|
||||||
use transaction::SignedTransaction;
|
use transaction::SignedTransaction;
|
||||||
use views::BlockView;
|
use views::BlockView;
|
||||||
|
use encoded;
|
||||||
|
|
||||||
/// Helper structure, used for encoding blocks.
|
/// Helper structure, used for encoding blocks.
|
||||||
#[derive(Default, Clone, RlpEncodable)]
|
#[derive(Default, Clone, RlpEncodable)]
|
||||||
@ -41,7 +41,7 @@ impl Block {
|
|||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn hash(&self) -> H256 {
|
pub fn hash(&self) -> H256 {
|
||||||
view!(BlockView, &self.encoded()).header_view().hash()
|
view!(BlockView, &self.encoded().raw()).header_view().hash()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -50,8 +50,8 @@ impl Block {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn encoded(&self) -> Bytes {
|
pub fn encoded(&self) -> encoded::Block {
|
||||||
encode(self).into_vec()
|
encoded::Block::new(encode(self).into_vec())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
|
@ -19,13 +19,14 @@ use ethereum_types::{H256, Bloom};
|
|||||||
use header::BlockNumber;
|
use header::BlockNumber;
|
||||||
use blockchain::block_info::BlockInfo;
|
use blockchain::block_info::BlockInfo;
|
||||||
use blockchain::extras::{BlockDetails, BlockReceipts, TransactionAddress};
|
use blockchain::extras::{BlockDetails, BlockReceipts, TransactionAddress};
|
||||||
|
use encoded::Block;
|
||||||
|
|
||||||
/// Block extras update info.
|
/// Block extras update info.
|
||||||
pub struct ExtrasUpdate<'a> {
|
pub struct ExtrasUpdate {
|
||||||
/// Block info.
|
/// Block info.
|
||||||
pub info: BlockInfo,
|
pub info: BlockInfo,
|
||||||
/// Current block uncompressed rlp bytes
|
/// Current block uncompressed rlp bytes
|
||||||
pub block: &'a [u8],
|
pub block: Block,
|
||||||
/// Modified block hashes.
|
/// Modified block hashes.
|
||||||
pub block_hashes: HashMap<BlockNumber, H256>,
|
pub block_hashes: HashMap<BlockNumber, H256>,
|
||||||
/// Modified block details.
|
/// Modified block details.
|
||||||
@ -44,6 +45,4 @@ pub struct ExtrasInsert {
|
|||||||
pub fork_choice: ::engines::ForkChoice,
|
pub fork_choice: ::engines::ForkChoice,
|
||||||
/// Is the inserted block considered finalized.
|
/// Is the inserted block considered finalized.
|
||||||
pub is_finalized: bool,
|
pub is_finalized: bool,
|
||||||
/// New block local metadata.
|
|
||||||
pub metadata: Option<Vec<u8>>,
|
|
||||||
}
|
}
|
||||||
|
@ -74,7 +74,6 @@ use types::filter::Filter;
|
|||||||
use types::ancestry_action::AncestryAction;
|
use types::ancestry_action::AncestryAction;
|
||||||
use verification;
|
use verification;
|
||||||
use verification::{PreverifiedBlock, Verifier, BlockQueue};
|
use verification::{PreverifiedBlock, Verifier, BlockQueue};
|
||||||
use views::BlockView;
|
|
||||||
|
|
||||||
// re-export
|
// re-export
|
||||||
pub use types::blockchain_info::BlockChainInfo;
|
pub use types::blockchain_info::BlockChainInfo;
|
||||||
@ -210,7 +209,7 @@ pub struct Client {
|
|||||||
/// Queued ancient blocks, make sure they are imported in order.
|
/// Queued ancient blocks, make sure they are imported in order.
|
||||||
queued_ancient_blocks: Arc<RwLock<(
|
queued_ancient_blocks: Arc<RwLock<(
|
||||||
HashSet<H256>,
|
HashSet<H256>,
|
||||||
VecDeque<(Header, Bytes, Bytes)>
|
VecDeque<(Header, encoded::Block, Bytes)>
|
||||||
)>>,
|
)>>,
|
||||||
ancient_blocks_import_lock: Arc<Mutex<()>>,
|
ancient_blocks_import_lock: Arc<Mutex<()>>,
|
||||||
/// Consensus messages import queue
|
/// Consensus messages import queue
|
||||||
@ -297,7 +296,7 @@ impl Importer {
|
|||||||
|
|
||||||
let transactions_len = closed_block.transactions().len();
|
let transactions_len = closed_block.transactions().len();
|
||||||
|
|
||||||
let route = self.commit_block(closed_block, &header, &bytes, client);
|
let route = self.commit_block(closed_block, &header, encoded::Block::new(bytes), client);
|
||||||
import_results.push(route);
|
import_results.push(route);
|
||||||
|
|
||||||
client.report.write().accrue_block(&header, transactions_len);
|
client.report.write().accrue_block(&header, transactions_len);
|
||||||
@ -430,9 +429,8 @@ impl Importer {
|
|||||||
///
|
///
|
||||||
/// The block is guaranteed to be the next best blocks in the
|
/// The block is guaranteed to be the next best blocks in the
|
||||||
/// first block sequence. Does no sealing or transaction validation.
|
/// first block sequence. Does no sealing or transaction validation.
|
||||||
fn import_old_block(&self, header: &Header, block_bytes: &[u8], receipts_bytes: &[u8], db: &KeyValueDB, chain: &BlockChain) -> Result<H256, ::error::Error> {
|
fn import_old_block(&self, header: &Header, block: encoded::Block, receipts_bytes: &[u8], db: &KeyValueDB, chain: &BlockChain) -> Result<(), ::error::Error> {
|
||||||
let receipts = ::rlp::decode_list(receipts_bytes);
|
let receipts = ::rlp::decode_list(receipts_bytes);
|
||||||
let hash = header.hash();
|
|
||||||
let _import_lock = self.import_lock.lock();
|
let _import_lock = self.import_lock.lock();
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -443,28 +441,29 @@ impl Importer {
|
|||||||
|
|
||||||
// Commit results
|
// Commit results
|
||||||
let mut batch = DBTransaction::new();
|
let mut batch = DBTransaction::new();
|
||||||
chain.insert_unordered_block(&mut batch, block_bytes, receipts, None, false, true);
|
chain.insert_unordered_block(&mut batch, block, receipts, None, false, true);
|
||||||
// Final commit to the DB
|
// Final commit to the DB
|
||||||
db.write_buffered(batch);
|
db.write_buffered(batch);
|
||||||
chain.commit();
|
chain.commit();
|
||||||
}
|
}
|
||||||
db.flush().expect("DB flush failed.");
|
db.flush().expect("DB flush failed.");
|
||||||
Ok(hash)
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: the header of the block passed here is not necessarily sealed, as
|
// NOTE: the header of the block passed here is not necessarily sealed, as
|
||||||
// it is for reconstructing the state transition.
|
// it is for reconstructing the state transition.
|
||||||
//
|
//
|
||||||
// The header passed is from the original block data and is sealed.
|
// The header passed is from the original block data and is sealed.
|
||||||
fn commit_block<B>(&self, block: B, header: &Header, block_data: &[u8], client: &Client) -> ImportRoute where B: Drain {
|
fn commit_block<B>(&self, block: B, header: &Header, block_data: encoded::Block, client: &Client) -> ImportRoute where B: Drain {
|
||||||
let hash = &header.hash();
|
let hash = &header.hash();
|
||||||
let number = header.number();
|
let number = header.number();
|
||||||
let parent = header.parent_hash();
|
let parent = header.parent_hash();
|
||||||
let chain = client.chain.read();
|
let chain = client.chain.read();
|
||||||
|
let is_finalized = false;
|
||||||
|
|
||||||
// Commit results
|
// Commit results
|
||||||
let block = block.drain();
|
let block = block.drain();
|
||||||
assert_eq!(header.hash(), view!(BlockView, block_data).header_view().hash());
|
debug_assert_eq!(header.hash(), block_data.header_view().hash());
|
||||||
|
|
||||||
let mut batch = DBTransaction::new();
|
let mut batch = DBTransaction::new();
|
||||||
|
|
||||||
@ -476,8 +475,7 @@ impl Importer {
|
|||||||
|
|
||||||
let new = ExtendedHeader {
|
let new = ExtendedHeader {
|
||||||
header: header.clone(),
|
header: header.clone(),
|
||||||
is_finalized: block.is_finalized,
|
is_finalized,
|
||||||
metadata: block.metadata,
|
|
||||||
parent_total_difficulty: chain.block_details(&parent).expect("Parent block is in the database; qed").total_difficulty
|
parent_total_difficulty: chain.block_details(&parent).expect("Parent block is in the database; qed").total_difficulty
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -493,8 +491,6 @@ impl Importer {
|
|||||||
ExtendedHeader {
|
ExtendedHeader {
|
||||||
parent_total_difficulty: details.total_difficulty - *header.difficulty(),
|
parent_total_difficulty: details.total_difficulty - *header.difficulty(),
|
||||||
is_finalized: details.is_finalized,
|
is_finalized: details.is_finalized,
|
||||||
metadata: details.metadata,
|
|
||||||
|
|
||||||
header: header,
|
header: header,
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -515,7 +511,7 @@ impl Importer {
|
|||||||
// state.
|
// state.
|
||||||
self.check_epoch_end_signal(
|
self.check_epoch_end_signal(
|
||||||
&header,
|
&header,
|
||||||
block_data,
|
block_data.raw(),
|
||||||
&receipts,
|
&receipts,
|
||||||
&state,
|
&state,
|
||||||
&chain,
|
&chain,
|
||||||
@ -532,8 +528,7 @@ impl Importer {
|
|||||||
|
|
||||||
let route = chain.insert_block(&mut batch, block_data, receipts.clone(), ExtrasInsert {
|
let route = chain.insert_block(&mut batch, block_data, receipts.clone(), ExtrasInsert {
|
||||||
fork_choice: fork_choice,
|
fork_choice: fork_choice,
|
||||||
is_finalized: block.is_finalized,
|
is_finalized,
|
||||||
metadata: new.metadata,
|
|
||||||
});
|
});
|
||||||
|
|
||||||
client.tracedb.read().import(&mut batch, TraceImportRequest {
|
client.tracedb.read().import(&mut batch, TraceImportRequest {
|
||||||
@ -1327,7 +1322,7 @@ impl ChainInfo for Client {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl BlockInfo for Client {
|
impl BlockInfo for Client {
|
||||||
fn block_header(&self, id: BlockId) -> Option<::encoded::Header> {
|
fn block_header(&self, id: BlockId) -> Option<encoded::Header> {
|
||||||
let chain = self.chain.read();
|
let chain = self.chain.read();
|
||||||
|
|
||||||
Self::block_hash(&chain, id).and_then(|hash| chain.block_header_data(&hash))
|
Self::block_hash(&chain, id).and_then(|hash| chain.block_header_data(&hash))
|
||||||
@ -2053,7 +2048,7 @@ impl IoClient for Client {
|
|||||||
{
|
{
|
||||||
let mut queued = self.queued_ancient_blocks.write();
|
let mut queued = self.queued_ancient_blocks.write();
|
||||||
queued.0.insert(hash);
|
queued.0.insert(hash);
|
||||||
queued.1.push_back((header, block_bytes, receipts_bytes));
|
queued.1.push_back((header, encoded::Block::new(block_bytes), receipts_bytes));
|
||||||
}
|
}
|
||||||
|
|
||||||
let queued = self.queued_ancient_blocks.clone();
|
let queued = self.queued_ancient_blocks.clone();
|
||||||
@ -2069,7 +2064,7 @@ impl IoClient for Client {
|
|||||||
let hash = header.hash();
|
let hash = header.hash();
|
||||||
let result = client.importer.import_old_block(
|
let result = client.importer.import_old_block(
|
||||||
&header,
|
&header,
|
||||||
&block_bytes,
|
block_bytes,
|
||||||
&receipts_bytes,
|
&receipts_bytes,
|
||||||
&**client.db.read().key_value(),
|
&**client.db.read().key_value(),
|
||||||
&*client.chain.read(),
|
&*client.chain.read(),
|
||||||
@ -2194,7 +2189,7 @@ impl ImportSealedBlock for Client {
|
|||||||
let block_data = block.rlp_bytes();
|
let block_data = block.rlp_bytes();
|
||||||
let header = block.header().clone();
|
let header = block.header().clone();
|
||||||
|
|
||||||
let route = self.importer.commit_block(block, &header, &block_data, self);
|
let route = self.importer.commit_block(block, &header, encoded::Block::new(block_data), self);
|
||||||
trace!(target: "client", "Imported sealed block #{} ({})", number, h);
|
trace!(target: "client", "Imported sealed block #{} ({})", number, h);
|
||||||
self.state_db.write().sync_cache(&route.enacted, &route.retracted, false);
|
self.state_db.write().sync_cache(&route.enacted, &route.retracted, false);
|
||||||
route
|
route
|
||||||
@ -2380,6 +2375,7 @@ mod tests {
|
|||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use kvdb::DBTransaction;
|
use kvdb::DBTransaction;
|
||||||
use blockchain::ExtrasInsert;
|
use blockchain::ExtrasInsert;
|
||||||
|
use encoded;
|
||||||
|
|
||||||
let client = generate_dummy_client(0);
|
let client = generate_dummy_client(0);
|
||||||
let genesis = client.chain_info().best_block_hash;
|
let genesis = client.chain_info().best_block_hash;
|
||||||
@ -2392,10 +2388,9 @@ mod tests {
|
|||||||
let another_client = client.clone();
|
let another_client = client.clone();
|
||||||
thread::spawn(move || {
|
thread::spawn(move || {
|
||||||
let mut batch = DBTransaction::new();
|
let mut batch = DBTransaction::new();
|
||||||
another_client.chain.read().insert_block(&mut batch, &new_block, Vec::new(), ExtrasInsert {
|
another_client.chain.read().insert_block(&mut batch, encoded::Block::new(new_block), Vec::new(), ExtrasInsert {
|
||||||
fork_choice: ::engines::ForkChoice::New,
|
fork_choice: ::engines::ForkChoice::New,
|
||||||
is_finalized: false,
|
is_finalized: false,
|
||||||
metadata: None,
|
|
||||||
});
|
});
|
||||||
go_thread.store(true, Ordering::SeqCst);
|
go_thread.store(true, Ordering::SeqCst);
|
||||||
});
|
});
|
||||||
|
@ -222,6 +222,11 @@ impl Block {
|
|||||||
|
|
||||||
/// Consume the view and return the raw bytes.
|
/// Consume the view and return the raw bytes.
|
||||||
pub fn into_inner(self) -> Vec<u8> { self.0 }
|
pub fn into_inner(self) -> Vec<u8> { self.0 }
|
||||||
|
|
||||||
|
/// Returns the reference to slice of bytes
|
||||||
|
pub fn raw(&self) -> &[u8] {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// forwarders to borrowed header view.
|
// forwarders to borrowed header view.
|
||||||
|
@ -43,8 +43,6 @@ pub struct ExtendedHeader {
|
|||||||
pub is_finalized: bool,
|
pub is_finalized: bool,
|
||||||
/// The parent block difficulty.
|
/// The parent block difficulty.
|
||||||
pub parent_total_difficulty: U256,
|
pub parent_total_difficulty: U256,
|
||||||
/// The block metadata information.
|
|
||||||
pub metadata: Option<Vec<u8>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A block header.
|
/// A block header.
|
||||||
@ -418,10 +416,6 @@ impl ::parity_machine::FinalizableHeader for ExtendedHeader {
|
|||||||
fn is_finalized(&self) -> bool { self.is_finalized }
|
fn is_finalized(&self) -> bool { self.is_finalized }
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ::parity_machine::WithMetadataHeader for ExtendedHeader {
|
|
||||||
fn metadata(&self) -> Option<&[u8]> { self.metadata.as_ref().map(|v| v.as_ref()) }
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use rustc_hex::FromHex;
|
use rustc_hex::FromHex;
|
||||||
|
@ -37,6 +37,7 @@ use rlp::{RlpStream, Rlp};
|
|||||||
use ethereum_types::{H256, U256};
|
use ethereum_types::{H256, U256};
|
||||||
use kvdb::KeyValueDB;
|
use kvdb::KeyValueDB;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
|
use encoded;
|
||||||
|
|
||||||
|
|
||||||
/// Snapshot creation and restoration for PoA chains.
|
/// Snapshot creation and restoration for PoA chains.
|
||||||
@ -339,7 +340,7 @@ impl Rebuilder for ChunkRebuilder {
|
|||||||
let parent_td: U256 = last_rlp.val_at(4)?;
|
let parent_td: U256 = last_rlp.val_at(4)?;
|
||||||
|
|
||||||
let mut batch = self.db.transaction();
|
let mut batch = self.db.transaction();
|
||||||
self.chain.insert_unordered_block(&mut batch, &block_data, receipts, Some(parent_td), true, false);
|
self.chain.insert_unordered_block(&mut batch, encoded::Block::new(block_data), receipts, Some(parent_td), true, false);
|
||||||
self.db.write_buffered(batch);
|
self.db.write_buffered(batch);
|
||||||
|
|
||||||
self.warp_target = Some(block.header);
|
self.warp_target = Some(block.header);
|
||||||
|
@ -35,6 +35,7 @@ use kvdb::KeyValueDB;
|
|||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use rlp::{RlpStream, Rlp};
|
use rlp::{RlpStream, Rlp};
|
||||||
use rand::OsRng;
|
use rand::OsRng;
|
||||||
|
use encoded;
|
||||||
|
|
||||||
/// Snapshot creation and restoration for PoW chains.
|
/// Snapshot creation and restoration for PoW chains.
|
||||||
/// This includes blocks from the head of the chain as a
|
/// This includes blocks from the head of the chain as a
|
||||||
@ -220,7 +221,6 @@ impl Rebuilder for PowRebuilder {
|
|||||||
/// Feed the rebuilder an uncompressed block chunk.
|
/// Feed the rebuilder an uncompressed block chunk.
|
||||||
/// Returns the number of blocks fed or any errors.
|
/// Returns the number of blocks fed or any errors.
|
||||||
fn feed(&mut self, chunk: &[u8], engine: &EthEngine, abort_flag: &AtomicBool) -> Result<(), ::error::Error> {
|
fn feed(&mut self, chunk: &[u8], engine: &EthEngine, abort_flag: &AtomicBool) -> Result<(), ::error::Error> {
|
||||||
use views::BlockView;
|
|
||||||
use snapshot::verify_old_block;
|
use snapshot::verify_old_block;
|
||||||
use ethereum_types::U256;
|
use ethereum_types::U256;
|
||||||
use triehash::ordered_trie_root;
|
use triehash::ordered_trie_root;
|
||||||
@ -250,7 +250,7 @@ impl Rebuilder for PowRebuilder {
|
|||||||
let receipts_root = ordered_trie_root(pair.at(1)?.iter().map(|r| r.as_raw()));
|
let receipts_root = ordered_trie_root(pair.at(1)?.iter().map(|r| r.as_raw()));
|
||||||
|
|
||||||
let block = abridged_block.to_block(parent_hash, cur_number, receipts_root)?;
|
let block = abridged_block.to_block(parent_hash, cur_number, receipts_root)?;
|
||||||
let block_bytes = block.rlp_bytes();
|
let block_bytes = encoded::Block::new(block.rlp_bytes());
|
||||||
let is_best = cur_number == self.best_number;
|
let is_best = cur_number == self.best_number;
|
||||||
|
|
||||||
if is_best {
|
if is_best {
|
||||||
@ -275,16 +275,16 @@ impl Rebuilder for PowRebuilder {
|
|||||||
|
|
||||||
// special-case the first block in each chunk.
|
// special-case the first block in each chunk.
|
||||||
if idx == 3 {
|
if idx == 3 {
|
||||||
if self.chain.insert_unordered_block(&mut batch, &block_bytes, receipts, Some(parent_total_difficulty), is_best, false) {
|
if self.chain.insert_unordered_block(&mut batch, block_bytes, receipts, Some(parent_total_difficulty), is_best, false) {
|
||||||
self.disconnected.push((cur_number, block.header.hash()));
|
self.disconnected.push((cur_number, block.header.hash()));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
self.chain.insert_unordered_block(&mut batch, &block_bytes, receipts, None, is_best, false);
|
self.chain.insert_unordered_block(&mut batch, block_bytes, receipts, None, is_best, false);
|
||||||
}
|
}
|
||||||
self.db.write_buffered(batch);
|
self.db.write_buffered(batch);
|
||||||
self.chain.commit();
|
self.chain.commit();
|
||||||
|
|
||||||
parent_hash = view!(BlockView, &block_bytes).hash();
|
parent_hash = block.header.hash();
|
||||||
cur_number += 1;
|
cur_number += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,15 +43,14 @@ fn chunk_and_restore(amount: u64) {
|
|||||||
let snapshot_path = tempdir.path().join("SNAP");
|
let snapshot_path = tempdir.path().join("SNAP");
|
||||||
|
|
||||||
let old_db = test_helpers::new_db();
|
let old_db = test_helpers::new_db();
|
||||||
let bc = BlockChain::new(Default::default(), &genesis.encoded(), old_db.clone());
|
let bc = BlockChain::new(Default::default(), genesis.encoded().raw(), old_db.clone());
|
||||||
|
|
||||||
// build the blockchain.
|
// build the blockchain.
|
||||||
let mut batch = DBTransaction::new();
|
let mut batch = DBTransaction::new();
|
||||||
for block in generator {
|
for block in generator {
|
||||||
bc.insert_block(&mut batch, &block.encoded(), vec![], ExtrasInsert {
|
bc.insert_block(&mut batch, block.encoded(), vec![], ExtrasInsert {
|
||||||
fork_choice: ::engines::ForkChoice::New,
|
fork_choice: ::engines::ForkChoice::New,
|
||||||
is_finalized: false,
|
is_finalized: false,
|
||||||
metadata: None,
|
|
||||||
});
|
});
|
||||||
bc.commit();
|
bc.commit();
|
||||||
}
|
}
|
||||||
@ -83,7 +82,7 @@ fn chunk_and_restore(amount: u64) {
|
|||||||
|
|
||||||
// restore it.
|
// restore it.
|
||||||
let new_db = test_helpers::new_db();
|
let new_db = test_helpers::new_db();
|
||||||
let new_chain = BlockChain::new(Default::default(), &genesis.encoded(), new_db.clone());
|
let new_chain = BlockChain::new(Default::default(), genesis.encoded().raw(), new_db.clone());
|
||||||
let mut rebuilder = SNAPSHOT_MODE.rebuilder(new_chain, new_db.clone(), &manifest).unwrap();
|
let mut rebuilder = SNAPSHOT_MODE.rebuilder(new_chain, new_db.clone(), &manifest).unwrap();
|
||||||
|
|
||||||
let reader = PackedReader::new(&snapshot_path).unwrap().unwrap();
|
let reader = PackedReader::new(&snapshot_path).unwrap().unwrap();
|
||||||
@ -98,7 +97,7 @@ fn chunk_and_restore(amount: u64) {
|
|||||||
drop(rebuilder);
|
drop(rebuilder);
|
||||||
|
|
||||||
// and test it.
|
// and test it.
|
||||||
let new_chain = BlockChain::new(Default::default(), &genesis.encoded(), new_db);
|
let new_chain = BlockChain::new(Default::default(), genesis.encoded().raw(), new_db);
|
||||||
assert_eq!(new_chain.best_block_hash(), best_hash);
|
assert_eq!(new_chain.best_block_hash(), best_hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -130,7 +129,7 @@ fn checks_flag() {
|
|||||||
|
|
||||||
let db = test_helpers::new_db();
|
let db = test_helpers::new_db();
|
||||||
let engine = ::spec::Spec::new_test().engine;
|
let engine = ::spec::Spec::new_test().engine;
|
||||||
let chain = BlockChain::new(Default::default(), &genesis.last().encoded(), db.clone());
|
let chain = BlockChain::new(Default::default(), genesis.last().encoded().raw(), db.clone());
|
||||||
|
|
||||||
let manifest = ::snapshot::ManifestData {
|
let manifest = ::snapshot::ManifestData {
|
||||||
version: 2,
|
version: 2,
|
||||||
|
@ -43,6 +43,7 @@ use blooms_db;
|
|||||||
use kvdb::KeyValueDB;
|
use kvdb::KeyValueDB;
|
||||||
use kvdb_rocksdb;
|
use kvdb_rocksdb;
|
||||||
use tempdir::TempDir;
|
use tempdir::TempDir;
|
||||||
|
use encoded;
|
||||||
|
|
||||||
/// Creates test block with corresponding header
|
/// Creates test block with corresponding header
|
||||||
pub fn create_test_block(header: &Header) -> Bytes {
|
pub fn create_test_block(header: &Header) -> Bytes {
|
||||||
@ -354,10 +355,9 @@ pub fn generate_dummy_blockchain(block_number: u32) -> BlockChain {
|
|||||||
let mut batch = db.key_value().transaction();
|
let mut batch = db.key_value().transaction();
|
||||||
for block_order in 1..block_number {
|
for block_order in 1..block_number {
|
||||||
// Total difficulty is always 0 here.
|
// Total difficulty is always 0 here.
|
||||||
bc.insert_block(&mut batch, &create_unverifiable_block(block_order, bc.best_block_hash()), vec![], ExtrasInsert {
|
bc.insert_block(&mut batch, encoded::Block::new(create_unverifiable_block(block_order, bc.best_block_hash())), vec![], ExtrasInsert {
|
||||||
fork_choice: ::engines::ForkChoice::New,
|
fork_choice: ::engines::ForkChoice::New,
|
||||||
is_finalized: false,
|
is_finalized: false,
|
||||||
metadata: None,
|
|
||||||
});
|
});
|
||||||
bc.commit();
|
bc.commit();
|
||||||
}
|
}
|
||||||
@ -373,10 +373,9 @@ pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> BlockChain {
|
|||||||
let mut batch = db.key_value().transaction();
|
let mut batch = db.key_value().transaction();
|
||||||
for block_order in 1..block_number {
|
for block_order in 1..block_number {
|
||||||
// Total difficulty is always 0 here.
|
// Total difficulty is always 0 here.
|
||||||
bc.insert_block(&mut batch, &create_unverifiable_block_with_extra(block_order, bc.best_block_hash(), None), vec![], ExtrasInsert {
|
bc.insert_block(&mut batch, encoded::Block::new(create_unverifiable_block_with_extra(block_order, bc.best_block_hash(), None)), vec![], ExtrasInsert {
|
||||||
fork_choice: ::engines::ForkChoice::New,
|
fork_choice: ::engines::ForkChoice::New,
|
||||||
is_finalized: false,
|
is_finalized: false,
|
||||||
metadata: None,
|
|
||||||
});
|
});
|
||||||
bc.commit();
|
bc.commit();
|
||||||
}
|
}
|
||||||
|
@ -467,7 +467,6 @@ mod tests {
|
|||||||
parent: header.parent_hash().clone(),
|
parent: header.parent_hash().clone(),
|
||||||
children: Vec::new(),
|
children: Vec::new(),
|
||||||
is_finalized: false,
|
is_finalized: false,
|
||||||
metadata: None,
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -95,22 +95,6 @@ pub trait Transactions: LiveBlock {
|
|||||||
fn transactions(&self) -> &[Self::Transaction];
|
fn transactions(&self) -> &[Self::Transaction];
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Trait for blocks which have finalized information.
|
|
||||||
pub trait Finalizable: LiveBlock {
|
|
||||||
/// Get whether the block is finalized.
|
|
||||||
fn is_finalized(&self) -> bool;
|
|
||||||
/// Mark the block as finalized.
|
|
||||||
fn mark_finalized(&mut self);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A state machine with block metadata.
|
|
||||||
pub trait WithMetadata: LiveBlock {
|
|
||||||
/// Get the current live block metadata.
|
|
||||||
fn metadata(&self) -> Option<&[u8]>;
|
|
||||||
/// Set the current live block metadata.
|
|
||||||
fn set_metadata(&mut self, value: Option<Vec<u8>>);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Generalization of types surrounding blockchain-suitable state machines.
|
/// Generalization of types surrounding blockchain-suitable state machines.
|
||||||
pub trait Machine: for<'a> LocalizedMachine<'a> {
|
pub trait Machine: for<'a> LocalizedMachine<'a> {
|
||||||
/// The block header type.
|
/// The block header type.
|
||||||
|
Loading…
Reference in New Issue
Block a user