Remove GetNodeData
This commit is contained in:
parent
32ea4d69a3
commit
a8668b371c
@ -2278,10 +2278,6 @@ impl BlockChainClient for Client {
|
|||||||
self.chain.read().find_uncle_hashes(hash, MAX_UNCLE_AGE)
|
self.chain.read().find_uncle_hashes(hash, MAX_UNCLE_AGE)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn state_data(&self, hash: &H256) -> Option<Bytes> {
|
|
||||||
self.state_db.read().journal_db().state(hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block_receipts(&self, hash: &H256) -> Option<BlockReceipts> {
|
fn block_receipts(&self, hash: &H256) -> Option<BlockReceipts> {
|
||||||
self.chain.read().block_receipts(hash)
|
self.chain.read().block_receipts(hash)
|
||||||
}
|
}
|
||||||
|
@ -929,17 +929,6 @@ impl BlockChainClient for TestBlockChainClient {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: returns just hashes instead of node state rlp(?)
|
|
||||||
fn state_data(&self, hash: &H256) -> Option<Bytes> {
|
|
||||||
// starts with 'f' ?
|
|
||||||
if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") {
|
|
||||||
let mut rlp = RlpStream::new();
|
|
||||||
rlp.append(&hash.clone());
|
|
||||||
return Some(rlp.out());
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block_receipts(&self, hash: &H256) -> Option<BlockReceipts> {
|
fn block_receipts(&self, hash: &H256) -> Option<BlockReceipts> {
|
||||||
// starts with 'f' ?
|
// starts with 'f' ?
|
||||||
if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") {
|
if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") {
|
||||||
|
@ -322,9 +322,6 @@ pub trait BlockChainClient:
|
|||||||
/// Get all possible uncle hashes for a block.
|
/// Get all possible uncle hashes for a block.
|
||||||
fn find_uncles(&self, hash: &H256) -> Option<Vec<H256>>;
|
fn find_uncles(&self, hash: &H256) -> Option<Vec<H256>>;
|
||||||
|
|
||||||
/// Get latest state node
|
|
||||||
fn state_data(&self, hash: &H256) -> Option<Bytes>;
|
|
||||||
|
|
||||||
/// Get block receipts data by block header hash.
|
/// Get block receipts data by block header hash.
|
||||||
fn block_receipts(&self, hash: &H256) -> Option<BlockReceipts>;
|
fn block_receipts(&self, hash: &H256) -> Option<BlockReceipts>;
|
||||||
|
|
||||||
|
@ -89,15 +89,6 @@ fn should_return_registrar() {
|
|||||||
assert!(U256::from_str(address).is_ok());
|
assert!(U256::from_str(address).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn returns_state_root_basic() {
|
|
||||||
let client = generate_dummy_client(6);
|
|
||||||
let test_spec = Spec::new_test();
|
|
||||||
let genesis_header = test_spec.genesis_header();
|
|
||||||
|
|
||||||
assert!(client.state_data(genesis_header.state_root()).is_some());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn imports_good_block() {
|
fn imports_good_block() {
|
||||||
let db = test_helpers::new_db();
|
let db = test_helpers::new_db();
|
||||||
|
@ -149,7 +149,6 @@ pub const PAR_PROTOCOL_VERSION_3: (u8, u8) = (3, 0x18);
|
|||||||
|
|
||||||
pub const MAX_BODIES_TO_SEND: usize = 256;
|
pub const MAX_BODIES_TO_SEND: usize = 256;
|
||||||
pub const MAX_HEADERS_TO_SEND: usize = 512;
|
pub const MAX_HEADERS_TO_SEND: usize = 512;
|
||||||
pub const MAX_NODE_DATA_TO_SEND: usize = 1024;
|
|
||||||
pub const MAX_RECEIPTS_HEADERS_TO_SEND: usize = 256;
|
pub const MAX_RECEIPTS_HEADERS_TO_SEND: usize = 256;
|
||||||
const MIN_PEERS_PROPAGATION: usize = 4;
|
const MIN_PEERS_PROPAGATION: usize = 4;
|
||||||
const MAX_PEERS_PROPAGATION: usize = 128;
|
const MAX_PEERS_PROPAGATION: usize = 128;
|
||||||
|
@ -29,15 +29,15 @@ use super::sync_packet::{
|
|||||||
PacketInfo, SyncPacket,
|
PacketInfo, SyncPacket,
|
||||||
SyncPacket::{
|
SyncPacket::{
|
||||||
BlockBodiesPacket, BlockHeadersPacket, ConsensusDataPacket, GetBlockBodiesPacket,
|
BlockBodiesPacket, BlockHeadersPacket, ConsensusDataPacket, GetBlockBodiesPacket,
|
||||||
GetBlockHeadersPacket, GetNodeDataPacket, GetReceiptsPacket, GetSnapshotDataPacket,
|
GetBlockHeadersPacket, GetReceiptsPacket, GetSnapshotDataPacket, GetSnapshotManifestPacket,
|
||||||
GetSnapshotManifestPacket, NodeDataPacket, ReceiptsPacket, SnapshotDataPacket,
|
ReceiptsPacket, SnapshotDataPacket, SnapshotManifestPacket, StatusPacket,
|
||||||
SnapshotManifestPacket, StatusPacket, TransactionsPacket,
|
TransactionsPacket,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
ChainSync, PacketDecodeError, RlpResponseResult, SyncHandler, MAX_BODIES_TO_SEND,
|
ChainSync, PacketDecodeError, RlpResponseResult, SyncHandler, MAX_BODIES_TO_SEND,
|
||||||
MAX_HEADERS_TO_SEND, MAX_NODE_DATA_TO_SEND, MAX_RECEIPTS_HEADERS_TO_SEND,
|
MAX_HEADERS_TO_SEND, MAX_RECEIPTS_HEADERS_TO_SEND,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// The Chain Sync Supplier: answers requests from peers with available data
|
/// The Chain Sync Supplier: answers requests from peers with available data
|
||||||
@ -79,13 +79,6 @@ impl SyncSupplier {
|
|||||||
format!("Error sending receipts: {:?}", e)
|
format!("Error sending receipts: {:?}", e)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
GetNodeDataPacket => {
|
|
||||||
SyncSupplier::return_rlp(io, &rlp, peer, SyncSupplier::return_node_data, |e| {
|
|
||||||
format!("Error sending nodes: {:?}", e)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
GetSnapshotManifestPacket => SyncSupplier::return_rlp(
|
GetSnapshotManifestPacket => SyncSupplier::return_rlp(
|
||||||
io,
|
io,
|
||||||
&rlp,
|
&rlp,
|
||||||
@ -252,38 +245,6 @@ impl SyncSupplier {
|
|||||||
Ok(Some((BlockBodiesPacket.id(), rlp)))
|
Ok(Some((BlockBodiesPacket.id(), rlp)))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Respond to GetNodeData request
|
|
||||||
fn return_node_data(io: &dyn SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult {
|
|
||||||
let payload_soft_limit = io.payload_soft_limit();
|
|
||||||
let mut count = r.item_count().unwrap_or(0);
|
|
||||||
trace!(target: "sync", "{} -> GetNodeData: {} entries", peer_id, count);
|
|
||||||
if count == 0 {
|
|
||||||
debug!(target: "sync", "Empty GetNodeData request, ignoring.");
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
count = cmp::min(count, MAX_NODE_DATA_TO_SEND);
|
|
||||||
let mut added = 0usize;
|
|
||||||
let mut data = Vec::new();
|
|
||||||
let mut total_bytes = 0;
|
|
||||||
for i in 0..count {
|
|
||||||
if let Some(node) = io.chain().state_data(&r.val_at::<H256>(i)?) {
|
|
||||||
total_bytes += node.len();
|
|
||||||
// Check that the packet won't be oversized
|
|
||||||
if total_bytes > payload_soft_limit {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
data.push(node);
|
|
||||||
added += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
trace!(target: "sync", "{} -> GetNodeData: return {} entries", peer_id, added);
|
|
||||||
let mut rlp = RlpStream::new_list(added);
|
|
||||||
for d in data {
|
|
||||||
rlp.append(&d);
|
|
||||||
}
|
|
||||||
Ok(Some((NodeDataPacket.id(), rlp)))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn return_receipts(io: &dyn SyncIo, rlp: &Rlp, peer_id: PeerId) -> RlpResponseResult {
|
fn return_receipts(io: &dyn SyncIo, rlp: &Rlp, peer_id: PeerId) -> RlpResponseResult {
|
||||||
let payload_soft_limit = io.payload_soft_limit();
|
let payload_soft_limit = io.payload_soft_limit();
|
||||||
let mut count = rlp.item_count().unwrap_or(0);
|
let mut count = rlp.item_count().unwrap_or(0);
|
||||||
@ -564,50 +525,6 @@ mod test {
|
|||||||
assert!(Rlp::new(&large_result.out()).item_count().unwrap() < large_num_blocks);
|
assert!(Rlp::new(&large_result.out()).item_count().unwrap() < large_num_blocks);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn return_nodes() {
|
|
||||||
let mut client = TestBlockChainClient::new();
|
|
||||||
let queue = RwLock::new(VecDeque::new());
|
|
||||||
let sync = dummy_sync_with_peer(H256::new(), &client);
|
|
||||||
let ss = TestSnapshotService::new();
|
|
||||||
let mut io = TestIo::new(&mut client, &ss, &queue, None);
|
|
||||||
|
|
||||||
let mut node_list = RlpStream::new_list(3);
|
|
||||||
node_list.append(&H256::from(
|
|
||||||
"0000000000000000000000000000000000000000000000005555555555555555",
|
|
||||||
));
|
|
||||||
node_list.append(&H256::from(
|
|
||||||
"ffffffffffffffffffffffffffffffffffffffffffffaaaaaaaaaaaaaaaaaaaa",
|
|
||||||
));
|
|
||||||
node_list.append(&H256::from(
|
|
||||||
"aff0000000000000000000000000000000000000000000000000000000000000",
|
|
||||||
));
|
|
||||||
|
|
||||||
let node_request = node_list.out();
|
|
||||||
// it returns rlp ONLY for hashes started with "f"
|
|
||||||
let result = SyncSupplier::return_node_data(&io, &Rlp::new(&node_request.clone()), 0);
|
|
||||||
|
|
||||||
assert!(result.is_ok());
|
|
||||||
let rlp_result = result.unwrap();
|
|
||||||
assert!(rlp_result.is_some());
|
|
||||||
|
|
||||||
// the length of one rlp-encoded hashe
|
|
||||||
let rlp = rlp_result.unwrap().1.out();
|
|
||||||
let rlp = Rlp::new(&rlp);
|
|
||||||
assert_eq!(Ok(1), rlp.item_count());
|
|
||||||
|
|
||||||
io.sender = Some(2usize);
|
|
||||||
|
|
||||||
SyncSupplier::dispatch_packet(
|
|
||||||
&RwLock::new(sync),
|
|
||||||
&mut io,
|
|
||||||
0usize,
|
|
||||||
GetNodeDataPacket.id(),
|
|
||||||
&node_request,
|
|
||||||
);
|
|
||||||
assert_eq!(1, io.packets.len());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn return_receipts_empty() {
|
fn return_receipts_empty() {
|
||||||
let mut client = TestBlockChainClient::new();
|
let mut client = TestBlockChainClient::new();
|
||||||
|
@ -45,8 +45,8 @@ pub enum SyncPacket {
|
|||||||
BlockBodiesPacket = 0x06,
|
BlockBodiesPacket = 0x06,
|
||||||
NewBlockPacket = 0x07,
|
NewBlockPacket = 0x07,
|
||||||
|
|
||||||
GetNodeDataPacket = 0x0d,
|
//GetNodeDataPacket = 0x0d,
|
||||||
NodeDataPacket = 0x0e,
|
//NodeDataPacket = 0x0e,
|
||||||
GetReceiptsPacket = 0x0f,
|
GetReceiptsPacket = 0x0f,
|
||||||
ReceiptsPacket = 0x10,
|
ReceiptsPacket = 0x10,
|
||||||
|
|
||||||
@ -82,8 +82,8 @@ impl PacketInfo for SyncPacket {
|
|||||||
| GetBlockBodiesPacket
|
| GetBlockBodiesPacket
|
||||||
| BlockBodiesPacket
|
| BlockBodiesPacket
|
||||||
| NewBlockPacket
|
| NewBlockPacket
|
||||||
| GetNodeDataPacket
|
//| GetNodeDataPacket
|
||||||
| NodeDataPacket
|
//| NodeDataPacket
|
||||||
| GetReceiptsPacket
|
| GetReceiptsPacket
|
||||||
| ReceiptsPacket => ETH_PROTOCOL,
|
| ReceiptsPacket => ETH_PROTOCOL,
|
||||||
|
|
||||||
|
@ -52,6 +52,7 @@ mod codes {
|
|||||||
pub const REQUEST_REJECTED_LIMIT: i64 = -32041;
|
pub const REQUEST_REJECTED_LIMIT: i64 = -32041;
|
||||||
pub const REQUEST_NOT_FOUND: i64 = -32042;
|
pub const REQUEST_NOT_FOUND: i64 = -32042;
|
||||||
pub const ENCRYPTION_ERROR: i64 = -32055;
|
pub const ENCRYPTION_ERROR: i64 = -32055;
|
||||||
|
#[cfg(any(test, feature = "accounts"))]
|
||||||
pub const ENCODING_ERROR: i64 = -32058;
|
pub const ENCODING_ERROR: i64 = -32058;
|
||||||
pub const FETCH_ERROR: i64 = -32060;
|
pub const FETCH_ERROR: i64 = -32060;
|
||||||
pub const NO_PEERS: i64 = -32066;
|
pub const NO_PEERS: i64 = -32066;
|
||||||
|
@ -23,10 +23,8 @@ use std::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
error_key_already_exists, error_negatively_reference_hash, memory_db::*, DB_PREFIX_LEN,
|
error_key_already_exists, error_negatively_reference_hash, memory_db::*, LATEST_ERA_KEY,
|
||||||
LATEST_ERA_KEY,
|
|
||||||
};
|
};
|
||||||
use bytes::Bytes;
|
|
||||||
use ethereum_types::H256;
|
use ethereum_types::H256;
|
||||||
use hash_db::HashDB;
|
use hash_db::HashDB;
|
||||||
use keccak_hasher::KeccakHasher;
|
use keccak_hasher::KeccakHasher;
|
||||||
@ -205,12 +203,6 @@ impl JournalDB for ArchiveDB {
|
|||||||
self.latest_era
|
self.latest_era
|
||||||
}
|
}
|
||||||
|
|
||||||
fn state(&self, id: &H256) -> Option<Bytes> {
|
|
||||||
self.backing
|
|
||||||
.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN])
|
|
||||||
.map(|b| b.into_vec())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_pruned(&self) -> bool {
|
fn is_pruned(&self) -> bool {
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
@ -494,24 +486,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn returns_state() {
|
|
||||||
let shared_db = Arc::new(kvdb_memorydb::create(0));
|
|
||||||
|
|
||||||
let key = {
|
|
||||||
let mut jdb = ArchiveDB::new(shared_db.clone(), None);
|
|
||||||
let key = jdb.insert(b"foo");
|
|
||||||
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
|
||||||
key
|
|
||||||
};
|
|
||||||
|
|
||||||
{
|
|
||||||
let jdb = ArchiveDB::new(shared_db, None);
|
|
||||||
let state = jdb.state(&key);
|
|
||||||
assert!(state.is_some());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn inject() {
|
fn inject() {
|
||||||
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
|
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
|
||||||
|
@ -23,8 +23,7 @@ use std::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
error_key_already_exists, error_negatively_reference_hash, traits::JournalDB, DB_PREFIX_LEN,
|
error_key_already_exists, error_negatively_reference_hash, traits::JournalDB, LATEST_ERA_KEY,
|
||||||
LATEST_ERA_KEY,
|
|
||||||
};
|
};
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use ethereum_types::H256;
|
use ethereum_types::H256;
|
||||||
@ -425,12 +424,6 @@ impl JournalDB for EarlyMergeDB {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn state(&self, id: &H256) -> Option<Bytes> {
|
|
||||||
self.backing
|
|
||||||
.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN])
|
|
||||||
.map(|b| b.into_vec())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> {
|
fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> {
|
||||||
// record new commit's details.
|
// record new commit's details.
|
||||||
let mut refs = match self.refs.as_ref() {
|
let mut refs = match self.refs.as_ref() {
|
||||||
|
@ -23,7 +23,6 @@ use std::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use super::{error_negatively_reference_hash, JournalDB, DB_PREFIX_LEN, LATEST_ERA_KEY};
|
use super::{error_negatively_reference_hash, JournalDB, DB_PREFIX_LEN, LATEST_ERA_KEY};
|
||||||
use bytes::Bytes;
|
|
||||||
use ethereum_types::H256;
|
use ethereum_types::H256;
|
||||||
use fastmap::H256FastMap;
|
use fastmap::H256FastMap;
|
||||||
use hash_db::HashDB;
|
use hash_db::HashDB;
|
||||||
@ -323,26 +322,6 @@ impl JournalDB for OverlayRecentDB {
|
|||||||
self.journal_overlay.read().earliest_era
|
self.journal_overlay.read().earliest_era
|
||||||
}
|
}
|
||||||
|
|
||||||
fn state(&self, key: &H256) -> Option<Bytes> {
|
|
||||||
let journal_overlay = self.journal_overlay.read();
|
|
||||||
let key = to_short_key(key);
|
|
||||||
journal_overlay
|
|
||||||
.backing_overlay
|
|
||||||
.get(&key)
|
|
||||||
.map(|v| v.into_vec())
|
|
||||||
.or_else(|| {
|
|
||||||
journal_overlay
|
|
||||||
.pending_overlay
|
|
||||||
.get(&key)
|
|
||||||
.map(|d| d.clone().into_vec())
|
|
||||||
})
|
|
||||||
.or_else(|| {
|
|
||||||
self.backing
|
|
||||||
.get_by_prefix(self.column, &key[0..DB_PREFIX_LEN])
|
|
||||||
.map(|b| b.into_vec())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> {
|
fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> {
|
||||||
trace!(target: "journaldb", "entry: #{} ({})", now, id);
|
trace!(target: "journaldb", "entry: #{} ({})", now, id);
|
||||||
|
|
||||||
|
@ -18,8 +18,7 @@
|
|||||||
|
|
||||||
use std::{collections::HashMap, io, sync::Arc};
|
use std::{collections::HashMap, io, sync::Arc};
|
||||||
|
|
||||||
use super::{traits::JournalDB, DB_PREFIX_LEN, LATEST_ERA_KEY};
|
use super::{traits::JournalDB, LATEST_ERA_KEY};
|
||||||
use bytes::Bytes;
|
|
||||||
use ethereum_types::H256;
|
use ethereum_types::H256;
|
||||||
use hash_db::HashDB;
|
use hash_db::HashDB;
|
||||||
use heapsize::HeapSizeOf;
|
use heapsize::HeapSizeOf;
|
||||||
@ -133,12 +132,6 @@ impl JournalDB for RefCountedDB {
|
|||||||
self.latest_era
|
self.latest_era
|
||||||
}
|
}
|
||||||
|
|
||||||
fn state(&self, id: &H256) -> Option<Bytes> {
|
|
||||||
self.backing
|
|
||||||
.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN])
|
|
||||||
.map(|b| b.into_vec())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> {
|
fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> {
|
||||||
// record new commit's details.
|
// record new commit's details.
|
||||||
let mut db_key = DatabaseKey {
|
let mut db_key = DatabaseKey {
|
||||||
|
@ -18,7 +18,6 @@
|
|||||||
|
|
||||||
use std::{io, sync::Arc};
|
use std::{io, sync::Arc};
|
||||||
|
|
||||||
use bytes::Bytes;
|
|
||||||
use ethereum_types::H256;
|
use ethereum_types::H256;
|
||||||
use hash_db::{AsHashDB, HashDB};
|
use hash_db::{AsHashDB, HashDB};
|
||||||
use keccak_hasher::KeccakHasher;
|
use keccak_hasher::KeccakHasher;
|
||||||
@ -81,9 +80,6 @@ pub trait JournalDB: KeyedHashDB {
|
|||||||
/// from this point onwards.
|
/// from this point onwards.
|
||||||
fn inject(&mut self, batch: &mut DBTransaction) -> io::Result<u32>;
|
fn inject(&mut self, batch: &mut DBTransaction) -> io::Result<u32>;
|
||||||
|
|
||||||
/// State data query
|
|
||||||
fn state(&self, _id: &H256) -> Option<Bytes>;
|
|
||||||
|
|
||||||
/// Whether this database is pruned.
|
/// Whether this database is pruned.
|
||||||
fn is_pruned(&self) -> bool {
|
fn is_pruned(&self) -> bool {
|
||||||
true
|
true
|
||||||
|
Loading…
Reference in New Issue
Block a user