ImportResult becomes a result

This commit is contained in:
arkpar 2016-01-10 23:37:09 +01:00
parent 5f5f26de48
commit 452294ab8d
7 changed files with 96 additions and 85 deletions

View File

@ -200,11 +200,16 @@ impl BlockChain {
/// ```json /// ```json
/// { blocks: [B4, B3, A3, A4], ancestor: A2, index: 2 } /// { blocks: [B4, B3, A3, A4], ancestor: A2, index: 2 }
/// ``` /// ```
pub fn tree_route(&self, from: H256, to: H256) -> TreeRoute { pub fn tree_route(&self, from: H256, to: H256) -> Option<TreeRoute> {
let from_details = self.block_details(&from).expect("from hash is invalid!"); let from_details = match self.block_details(&from) {
let to_details = self.block_details(&to).expect("to hash is invalid!"); Some(h) => h,
None => return None,
self._tree_route((from_details, from), (to_details, to)) };
let to_details = match self.block_details(&to) {
Some(h) => h,
None => return None,
};
Some(self._tree_route((from_details, from), (to_details, to)))
} }
/// Similar to `tree_route` function, but can be used to return a route /// Similar to `tree_route` function, but can be used to return a route
@ -597,52 +602,52 @@ mod tests {
assert_eq!(bc.block_hash(&U256::from(3)).unwrap(), b3a_hash); assert_eq!(bc.block_hash(&U256::from(3)).unwrap(), b3a_hash);
// test trie route // test trie route
let r0_1 = bc.tree_route(genesis_hash.clone(), b1_hash.clone()); let r0_1 = bc.tree_route(genesis_hash.clone(), b1_hash.clone()).unwrap();
assert_eq!(r0_1.ancestor, genesis_hash); assert_eq!(r0_1.ancestor, genesis_hash);
assert_eq!(r0_1.blocks, [b1_hash.clone()]); assert_eq!(r0_1.blocks, [b1_hash.clone()]);
assert_eq!(r0_1.index, 0); assert_eq!(r0_1.index, 0);
let r0_2 = bc.tree_route(genesis_hash.clone(), b2_hash.clone()); let r0_2 = bc.tree_route(genesis_hash.clone(), b2_hash.clone()).unwrap();
assert_eq!(r0_2.ancestor, genesis_hash); assert_eq!(r0_2.ancestor, genesis_hash);
assert_eq!(r0_2.blocks, [b1_hash.clone(), b2_hash.clone()]); assert_eq!(r0_2.blocks, [b1_hash.clone(), b2_hash.clone()]);
assert_eq!(r0_2.index, 0); assert_eq!(r0_2.index, 0);
let r1_3a = bc.tree_route(b1_hash.clone(), b3a_hash.clone()); let r1_3a = bc.tree_route(b1_hash.clone(), b3a_hash.clone()).unwrap();
assert_eq!(r1_3a.ancestor, b1_hash); assert_eq!(r1_3a.ancestor, b1_hash);
assert_eq!(r1_3a.blocks, [b2_hash.clone(), b3a_hash.clone()]); assert_eq!(r1_3a.blocks, [b2_hash.clone(), b3a_hash.clone()]);
assert_eq!(r1_3a.index, 0); assert_eq!(r1_3a.index, 0);
let r1_3b = bc.tree_route(b1_hash.clone(), b3b_hash.clone()); let r1_3b = bc.tree_route(b1_hash.clone(), b3b_hash.clone()).unwrap();
assert_eq!(r1_3b.ancestor, b1_hash); assert_eq!(r1_3b.ancestor, b1_hash);
assert_eq!(r1_3b.blocks, [b2_hash.clone(), b3b_hash.clone()]); assert_eq!(r1_3b.blocks, [b2_hash.clone(), b3b_hash.clone()]);
assert_eq!(r1_3b.index, 0); assert_eq!(r1_3b.index, 0);
let r3a_3b = bc.tree_route(b3a_hash.clone(), b3b_hash.clone()); let r3a_3b = bc.tree_route(b3a_hash.clone(), b3b_hash.clone()).unwrap();
assert_eq!(r3a_3b.ancestor, b2_hash); assert_eq!(r3a_3b.ancestor, b2_hash);
assert_eq!(r3a_3b.blocks, [b3a_hash.clone(), b3b_hash.clone()]); assert_eq!(r3a_3b.blocks, [b3a_hash.clone(), b3b_hash.clone()]);
assert_eq!(r3a_3b.index, 1); assert_eq!(r3a_3b.index, 1);
let r1_0 = bc.tree_route(b1_hash.clone(), genesis_hash.clone()); let r1_0 = bc.tree_route(b1_hash.clone(), genesis_hash.clone()).unwrap();
assert_eq!(r1_0.ancestor, genesis_hash); assert_eq!(r1_0.ancestor, genesis_hash);
assert_eq!(r1_0.blocks, [b1_hash.clone()]); assert_eq!(r1_0.blocks, [b1_hash.clone()]);
assert_eq!(r1_0.index, 1); assert_eq!(r1_0.index, 1);
let r2_0 = bc.tree_route(b2_hash.clone(), genesis_hash.clone()); let r2_0 = bc.tree_route(b2_hash.clone(), genesis_hash.clone()).unwrap();
assert_eq!(r2_0.ancestor, genesis_hash); assert_eq!(r2_0.ancestor, genesis_hash);
assert_eq!(r2_0.blocks, [b2_hash.clone(), b1_hash.clone()]); assert_eq!(r2_0.blocks, [b2_hash.clone(), b1_hash.clone()]);
assert_eq!(r2_0.index, 2); assert_eq!(r2_0.index, 2);
let r3a_1 = bc.tree_route(b3a_hash.clone(), b1_hash.clone()); let r3a_1 = bc.tree_route(b3a_hash.clone(), b1_hash.clone()).unwrap();
assert_eq!(r3a_1.ancestor, b1_hash); assert_eq!(r3a_1.ancestor, b1_hash);
assert_eq!(r3a_1.blocks, [b3a_hash.clone(), b2_hash.clone()]); assert_eq!(r3a_1.blocks, [b3a_hash.clone(), b2_hash.clone()]);
assert_eq!(r3a_1.index, 2); assert_eq!(r3a_1.index, 2);
let r3b_1 = bc.tree_route(b3b_hash.clone(), b1_hash.clone()); let r3b_1 = bc.tree_route(b3b_hash.clone(), b1_hash.clone()).unwrap();
assert_eq!(r3b_1.ancestor, b1_hash); assert_eq!(r3b_1.ancestor, b1_hash);
assert_eq!(r3b_1.blocks, [b3b_hash.clone(), b2_hash.clone()]); assert_eq!(r3b_1.blocks, [b3b_hash.clone(), b2_hash.clone()]);
assert_eq!(r3b_1.index, 2); assert_eq!(r3b_1.index, 2);
let r3b_3a = bc.tree_route(b3b_hash.clone(), b3a_hash.clone()); let r3b_3a = bc.tree_route(b3b_hash.clone(), b3a_hash.clone()).unwrap();
assert_eq!(r3b_3a.ancestor, b2_hash); assert_eq!(r3b_3a.ancestor, b2_hash);
assert_eq!(r3b_3a.blocks, [b3b_hash.clone(), b3a_hash.clone()]); assert_eq!(r3b_3a.blocks, [b3b_hash.clone(), b3a_hash.clone()]);
assert_eq!(r3b_3a.index, 1); assert_eq!(r3b_3a.index, 1);

View File

@ -2,21 +2,14 @@ use std::sync::Arc;
use util::*; use util::*;
use blockchain::BlockChain; use blockchain::BlockChain;
use views::BlockView; use views::BlockView;
use error::ImportError;
/// Status for a block in a queue.
pub enum QueueStatus {
/// Part of the known chain.
Known,
/// Part of the unknown chain.
Unknown,
}
/// General block status /// General block status
pub enum BlockStatus { pub enum BlockStatus {
/// Part of the blockchain. /// Part of the blockchain.
InChain, InChain,
/// Queued for import. /// Queued for import.
Queued(QueueStatus), Queued,
/// Known as bad. /// Known as bad.
Bad, Bad,
/// Unknown. /// Unknown.
@ -24,16 +17,7 @@ pub enum BlockStatus {
} }
/// Result of import block operation. /// Result of import block operation.
pub enum ImportResult { pub type ImportResult = Result<(), ImportError>;
/// Added to import queue.
Queued(QueueStatus),
/// Already in the chain.
AlreadyInChain,
/// Already queued for import.
AlreadyQueued(QueueStatus),
/// Bad or already known as bad.
Bad,
}
/// Information about the blockchain gthered together. /// Information about the blockchain gthered together.
pub struct BlockChainInfo { pub struct BlockChainInfo {
@ -88,7 +72,7 @@ pub trait BlockChainClient : Sync {
/// Get a tree route between `from` and `to`. /// Get a tree route between `from` and `to`.
/// See `BlockChain::tree_route`. /// See `BlockChain::tree_route`.
fn tree_route(&self, from: &H256, to: &H256) -> TreeRoute; fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute>;
/// Get latest state node /// Get latest state node
fn state_data(&self, hash: &H256) -> Option<Bytes>; fn state_data(&self, hash: &H256) -> Option<Bytes>;
@ -165,7 +149,7 @@ impl BlockChainClient for Client {
} }
} }
fn tree_route(&self, from: &H256, to: &H256) -> TreeRoute { fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute> {
self.chain.tree_route(from.clone(), to.clone()) self.chain.tree_route(from.clone(), to.clone())
} }
@ -184,11 +168,11 @@ impl BlockChainClient for Client {
let header = block.header_view(); let header = block.header_view();
let hash = header.sha3(); let hash = header.sha3();
if self.chain.is_known(&hash) { if self.chain.is_known(&hash) {
return ImportResult::Bad; return Err(ImportError::AlreadyInChain);
} }
} }
self.chain.insert_block(bytes); self.chain.insert_block(bytes);
ImportResult::Queued(QueueStatus::Known) Ok(())
} }
fn queue_status(&self) -> BlockQueueStatus { fn queue_status(&self) -> BlockQueueStatus {

View File

@ -23,6 +23,13 @@ pub enum BlockError {
InvalidSealArity(Mismatch<usize>), InvalidSealArity(Mismatch<usize>),
} }
#[derive(Debug)]
pub enum ImportError {
Bad(BlockError),
AlreadyInChain,
AlreadyQueued,
}
#[derive(Debug)] #[derive(Debug)]
/// General error type which should be capable of representing all errors in ethcore. /// General error type which should be capable of representing all errors in ethcore.
pub enum Error { pub enum Error {

View File

@ -1,26 +1,24 @@
use std::sync::Arc; use std::sync::Arc;
//use util::bytes::*; use util::*;
use util::sha3::*;
use blockchain::BlockChain; use blockchain::BlockChain;
use client::{QueueStatus, ImportResult}; use client::{QueueStatus, ImportResult};
use views::{BlockView}; use views::{BlockView};
/// A queue of blocks. Sits between network or other I/O and the BlockChain.
pub struct BlockQueue { /// Sorts them ready for blockchain insertion.
chain: Arc<BlockChain> pub struct BlockQueue;
}
impl BlockQueue { impl BlockQueue {
pub fn new(chain: Arc<BlockChain>) -> BlockQueue { /// Creates a new queue instance.
BlockQueue { pub fn new() -> BlockQueue {
chain: chain
}
} }
/// Clear the queue and stop verification activity.
pub fn clear(&mut self) { pub fn clear(&mut self) {
} }
pub fn import_block(&mut self, bytes: &[u8]) -> ImportResult { /// Add a block to the queue.
pub fn import_block(&mut self, bytes: &[u8], bc: &mut BlockChain) -> ImportResult {
//TODO: verify block //TODO: verify block
{ {
let block = BlockView::new(bytes); let block = BlockView::new(bytes);
@ -30,7 +28,7 @@ impl BlockQueue {
return ImportResult::Bad; return ImportResult::Bad;
} }
} }
self.chain.insert_block(bytes); bc.insert_block(bytes);
ImportResult::Queued(QueueStatus::Known) ImportResult::Queued(QueueStatus::Known)
} }
} }

View File

@ -17,8 +17,9 @@ use util::*;
use std::mem::{replace}; use std::mem::{replace};
use views::{HeaderView}; use views::{HeaderView};
use header::{Header as BlockHeader}; use header::{Header as BlockHeader};
use client::{BlockNumber, BlockChainClient, BlockStatus, QueueStatus, ImportResult}; use client::{BlockNumber, BlockChainClient, BlockStatus};
use sync::range_collection::{RangeCollection, ToUsize, FromUsize}; use sync::range_collection::{RangeCollection, ToUsize, FromUsize};
use error::*;
use sync::io::SyncIo; use sync::io::SyncIo;
impl ToUsize for BlockNumber { impl ToUsize for BlockNumber {
@ -76,12 +77,13 @@ struct HeaderId {
} }
#[derive(Copy, Clone, Eq, PartialEq, Debug)] #[derive(Copy, Clone, Eq, PartialEq, Debug)]
/// Sync state
pub enum SyncState { pub enum SyncState {
/// Initial chain sync has not started yet /// Initial chain sync has not started yet
NotSynced, NotSynced,
/// Initial chain sync complete. Waiting for new packets /// Initial chain sync complete. Waiting for new packets
Idle, Idle,
/// Block downloading paused. Waiting for block queue to process blocks and free space /// Block downloading paused. Waiting for block queue to process blocks and free some space
Waiting, Waiting,
/// Downloading blocks /// Downloading blocks
Blocks, Blocks,
@ -108,24 +110,33 @@ pub struct SyncStatus {
} }
#[derive(PartialEq, Eq, Debug)] #[derive(PartialEq, Eq, Debug)]
/// Peer data type requested
enum PeerAsking { enum PeerAsking {
Nothing, Nothing,
BlockHeaders, BlockHeaders,
BlockBodies, BlockBodies,
} }
/// Syncing peer information
struct PeerInfo { struct PeerInfo {
/// eth protocol version
protocol_version: u32, protocol_version: u32,
/// Peer chain genesis hash
genesis: H256, genesis: H256,
/// Peer network id
network_id: U256, network_id: U256,
/// Peer best block hash
latest: H256, latest: H256,
/// Peer total difficulty
difficulty: U256, difficulty: U256,
/// Type of data currenty being requested from peer.
asking: PeerAsking, asking: PeerAsking,
/// A set of block numbers being requested
asking_blocks: Vec<BlockNumber>, asking_blocks: Vec<BlockNumber>,
} }
type Body = Bytes; /// Blockchain sync handler.
/// See module documentation for more details.
pub struct ChainSync { pub struct ChainSync {
/// Sync state /// Sync state
state: SyncState, state: SyncState,
@ -140,7 +151,7 @@ pub struct ChainSync {
/// Downloaded headers. /// Downloaded headers.
headers: Vec<(BlockNumber, Vec<Header>)>, //TODO: use BTreeMap once range API is sable. For now it is a vector sorted in descending order headers: Vec<(BlockNumber, Vec<Header>)>, //TODO: use BTreeMap once range API is sable. For now it is a vector sorted in descending order
/// Downloaded bodies /// Downloaded bodies
bodies: Vec<(BlockNumber, Vec<Body>)>, //TODO: use BTreeMap once range API is sable. For now it is a vector sorted in descending order bodies: Vec<(BlockNumber, Vec<Bytes>)>, //TODO: use BTreeMap once range API is sable. For now it is a vector sorted in descending order
/// Peer info /// Peer info
peers: HashMap<PeerId, PeerInfo>, peers: HashMap<PeerId, PeerInfo>,
/// Used to map body to header /// Used to map body to header
@ -390,31 +401,31 @@ impl ChainSync {
// TODO: Decompose block and add to self.headers and self.bodies instead // TODO: Decompose block and add to self.headers and self.bodies instead
if header_view.number() == From::from(self.last_imported_block + 1) { if header_view.number() == From::from(self.last_imported_block + 1) {
match io.chain().import_block(block_rlp.as_raw()) { match io.chain().import_block(block_rlp.as_raw()) {
ImportResult::AlreadyInChain => { Err(ImportError::AlreadyInChain) => {
trace!(target: "sync", "New block already in chain {:?}", h); trace!(target: "sync", "New block already in chain {:?}", h);
}, },
ImportResult::AlreadyQueued(_) => { Err(ImportError::AlreadyQueued) => {
trace!(target: "sync", "New block already queued {:?}", h); trace!(target: "sync", "New block already queued {:?}", h);
}, },
ImportResult::Queued(QueueStatus::Known) => { Ok(()) => {
trace!(target: "sync", "New block queued {:?}", h); trace!(target: "sync", "New block queued {:?}", h);
}, },
ImportResult::Queued(QueueStatus::Unknown) => { Err(e) => {
trace!(target: "sync", "New block unknown {:?}", h); debug!(target: "sync", "Bad new block {:?} : {:?}", h, e);
//TODO: handle too many unknown blocks
let difficulty: U256 = try!(r.val_at(1));
let peer_difficulty = self.peers.get_mut(&peer_id).expect("ChainSync: unknown peer").difficulty;
if difficulty > peer_difficulty {
trace!(target: "sync", "Received block {:?} with no known parent. Peer needs syncing...", h);
self.sync_peer(io, peer_id, true);
}
},
ImportResult::Bad =>{
debug!(target: "sync", "Bad new block {:?}", h);
io.disable_peer(peer_id); io.disable_peer(peer_id);
} }
}; };
} }
else {
trace!(target: "sync", "New block unknown {:?}", h);
//TODO: handle too many unknown blocks
let difficulty: U256 = try!(r.val_at(1));
let peer_difficulty = self.peers.get_mut(&peer_id).expect("ChainSync: unknown peer").difficulty;
if difficulty > peer_difficulty {
trace!(target: "sync", "Received block {:?} with no known parent. Peer needs syncing...", h);
self.sync_peer(io, peer_id, true);
}
}
Ok(()) Ok(())
} }
@ -434,7 +445,7 @@ impl ChainSync {
BlockStatus::InChain => { BlockStatus::InChain => {
trace!(target: "sync", "New block hash already in chain {:?}", h); trace!(target: "sync", "New block hash already in chain {:?}", h);
}, },
BlockStatus::Queued(_) => { BlockStatus::Queued => {
trace!(target: "sync", "New hash block already queued {:?}", h); trace!(target: "sync", "New hash block already queued {:?}", h);
}, },
BlockStatus::Unknown => { BlockStatus::Unknown => {
@ -642,27 +653,24 @@ impl ChainSync {
block_rlp.append_raw(body.at(1).as_raw(), 1); block_rlp.append_raw(body.at(1).as_raw(), 1);
let h = &headers.1[i].hash; let h = &headers.1[i].hash;
match io.chain().import_block(&block_rlp.out()) { match io.chain().import_block(&block_rlp.out()) {
ImportResult::AlreadyInChain => { Err(ImportError::AlreadyInChain) => {
trace!(target: "sync", "Block already in chain {:?}", h); trace!(target: "sync", "Block already in chain {:?}", h);
self.last_imported_block = headers.0 + i as BlockNumber; self.last_imported_block = headers.0 + i as BlockNumber;
self.last_imported_hash = h.clone(); self.last_imported_hash = h.clone();
}, },
ImportResult::AlreadyQueued(_) => { Err(ImportError::AlreadyQueued) => {
trace!(target: "sync", "Block already queued {:?}", h); trace!(target: "sync", "Block already queued {:?}", h);
self.last_imported_block = headers.0 + i as BlockNumber; self.last_imported_block = headers.0 + i as BlockNumber;
self.last_imported_hash = h.clone(); self.last_imported_hash = h.clone();
}, },
ImportResult::Queued(QueueStatus::Known) => { Ok(()) => {
trace!(target: "sync", "Block queued {:?}", h); trace!(target: "sync", "Block queued {:?}", h);
self.last_imported_block = headers.0 + i as BlockNumber; self.last_imported_block = headers.0 + i as BlockNumber;
self.last_imported_hash = h.clone(); self.last_imported_hash = h.clone();
imported += 1; imported += 1;
}, },
ImportResult::Queued(QueueStatus::Unknown) => { Err(e) => {
panic!("Queued out of order block"); debug!(target: "sync", "Bad block {:?} : {:?}", h, e);
},
ImportResult::Bad =>{
debug!(target: "sync", "Bad block {:?}", h);
restart = true; restart = true;
} }
} }

View File

@ -2,19 +2,28 @@ use client::BlockChainClient;
use util::network::{HandlerIo, PeerId, PacketId,}; use util::network::{HandlerIo, PeerId, PacketId,};
use util::error::UtilError; use util::error::UtilError;
/// IO interface for the syning handler.
/// Provides peer connection management and an interface to the blockchain client.
// TODO: ratings
pub trait SyncIo { pub trait SyncIo {
/// Disable a peer
fn disable_peer(&mut self, peer_id: &PeerId); fn disable_peer(&mut self, peer_id: &PeerId);
/// Respond to current request with a packet. Can be called from an IO handler for incoming packet.
fn respond(&mut self, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError>; fn respond(&mut self, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError>;
/// Send a packet to a peer.
fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError>; fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError>;
/// Get the blockchain
fn chain<'s>(&'s mut self) -> &'s mut BlockChainClient; fn chain<'s>(&'s mut self) -> &'s mut BlockChainClient;
} }
/// Wraps `HandlerIo` and the blockchain client
pub struct NetSyncIo<'s, 'h> where 'h:'s { pub struct NetSyncIo<'s, 'h> where 'h:'s {
network: &'s mut HandlerIo<'h>, network: &'s mut HandlerIo<'h>,
chain: &'s mut BlockChainClient chain: &'s mut BlockChainClient
} }
impl<'s, 'h> NetSyncIo<'s, 'h> { impl<'s, 'h> NetSyncIo<'s, 'h> {
/// Creates a new instance from the `HandlerIo` and the blockchain client reference.
pub fn new(network: &'s mut HandlerIo<'h>, chain: &'s mut BlockChainClient) -> NetSyncIo<'s,'h> { pub fn new(network: &'s mut HandlerIo<'h>, chain: &'s mut BlockChainClient) -> NetSyncIo<'s,'h> {
NetSyncIo { NetSyncIo {
network: network, network: network,

View File

@ -6,7 +6,7 @@ use util::sha3::Hashable;
use util::rlp::{self, Rlp, RlpStream, View, Stream}; use util::rlp::{self, Rlp, RlpStream, View, Stream};
use util::network::{PeerId, PacketId}; use util::network::{PeerId, PacketId};
use util::error::UtilError; use util::error::UtilError;
use client::{BlockChainClient, BlockStatus, BlockNumber, TreeRoute, BlockQueueStatus, BlockChainInfo, ImportResult, QueueStatus}; use client::{BlockChainClient, BlockStatus, BlockNumber, TreeRoute, BlockQueueStatus, BlockChainInfo, ImportResult};
use header::Header as BlockHeader; use header::Header as BlockHeader;
use sync::io::SyncIo; use sync::io::SyncIo;
use sync::chain::ChainSync; use sync::chain::ChainSync;
@ -49,7 +49,7 @@ impl TestBlockChainClient {
rlp.append(&header); rlp.append(&header);
rlp.append_raw(&rlp::NULL_RLP, 1); rlp.append_raw(&rlp::NULL_RLP, 1);
rlp.append_raw(uncles.as_raw(), 1); rlp.append_raw(uncles.as_raw(), 1);
self.import_block(rlp.as_raw()); self.import_block(rlp.as_raw()).unwrap();
} }
} }
} }
@ -100,12 +100,12 @@ impl BlockChainClient for TestBlockChainClient {
} }
} }
fn tree_route(&self, _from: &H256, _to: &H256) -> TreeRoute { fn tree_route(&self, _from: &H256, _to: &H256) -> Option<TreeRoute> {
TreeRoute { Some(TreeRoute {
blocks: Vec::new(), blocks: Vec::new(),
ancestor: H256::new(), ancestor: H256::new(),
index: 0 index: 0
} })
} }
fn state_data(&self, _h: &H256) -> Option<Bytes> { fn state_data(&self, _h: &H256) -> Option<Bytes> {
@ -153,7 +153,7 @@ impl BlockChainClient for TestBlockChainClient {
else { else {
self.blocks.insert(header.hash(), b.to_vec()); self.blocks.insert(header.hash(), b.to_vec());
} }
ImportResult::Queued(QueueStatus::Known) Ok(())
} }
fn queue_status(&self) -> BlockQueueStatus { fn queue_status(&self) -> BlockQueueStatus {