Remove GetNodeData

This commit is contained in:
rakita 2020-08-24 14:18:03 +02:00 committed by Artem Vorotnikov
parent 32ea4d69a3
commit a8668b371c
No known key found for this signature in database
GPG Key ID: E0148C3F2FBB7A20
13 changed files with 12 additions and 187 deletions

View File

@ -2278,10 +2278,6 @@ impl BlockChainClient for Client {
self.chain.read().find_uncle_hashes(hash, MAX_UNCLE_AGE)
}
fn state_data(&self, hash: &H256) -> Option<Bytes> {
self.state_db.read().journal_db().state(hash)
}
fn block_receipts(&self, hash: &H256) -> Option<BlockReceipts> {
self.chain.read().block_receipts(hash)
}

View File

@ -929,17 +929,6 @@ impl BlockChainClient for TestBlockChainClient {
None
}
// TODO: returns just hashes instead of node state rlp(?)
fn state_data(&self, hash: &H256) -> Option<Bytes> {
// starts with 'f' ?
if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") {
let mut rlp = RlpStream::new();
rlp.append(&hash.clone());
return Some(rlp.out());
}
None
}
fn block_receipts(&self, hash: &H256) -> Option<BlockReceipts> {
// starts with 'f' ?
if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") {

View File

@ -322,9 +322,6 @@ pub trait BlockChainClient:
/// Get all possible uncle hashes for a block.
fn find_uncles(&self, hash: &H256) -> Option<Vec<H256>>;
/// Get latest state node
fn state_data(&self, hash: &H256) -> Option<Bytes>;
/// Get block receipts data by block header hash.
fn block_receipts(&self, hash: &H256) -> Option<BlockReceipts>;

View File

@ -89,15 +89,6 @@ fn should_return_registrar() {
assert!(U256::from_str(address).is_ok());
}
#[test]
fn returns_state_root_basic() {
let client = generate_dummy_client(6);
let test_spec = Spec::new_test();
let genesis_header = test_spec.genesis_header();
assert!(client.state_data(genesis_header.state_root()).is_some());
}
#[test]
fn imports_good_block() {
let db = test_helpers::new_db();

View File

@ -149,7 +149,6 @@ pub const PAR_PROTOCOL_VERSION_3: (u8, u8) = (3, 0x18);
pub const MAX_BODIES_TO_SEND: usize = 256;
pub const MAX_HEADERS_TO_SEND: usize = 512;
pub const MAX_NODE_DATA_TO_SEND: usize = 1024;
pub const MAX_RECEIPTS_HEADERS_TO_SEND: usize = 256;
const MIN_PEERS_PROPAGATION: usize = 4;
const MAX_PEERS_PROPAGATION: usize = 128;

View File

@ -29,15 +29,15 @@ use super::sync_packet::{
PacketInfo, SyncPacket,
SyncPacket::{
BlockBodiesPacket, BlockHeadersPacket, ConsensusDataPacket, GetBlockBodiesPacket,
GetBlockHeadersPacket, GetNodeDataPacket, GetReceiptsPacket, GetSnapshotDataPacket,
GetSnapshotManifestPacket, NodeDataPacket, ReceiptsPacket, SnapshotDataPacket,
SnapshotManifestPacket, StatusPacket, TransactionsPacket,
GetBlockHeadersPacket, GetReceiptsPacket, GetSnapshotDataPacket, GetSnapshotManifestPacket,
ReceiptsPacket, SnapshotDataPacket, SnapshotManifestPacket, StatusPacket,
TransactionsPacket,
},
};
use super::{
ChainSync, PacketDecodeError, RlpResponseResult, SyncHandler, MAX_BODIES_TO_SEND,
MAX_HEADERS_TO_SEND, MAX_NODE_DATA_TO_SEND, MAX_RECEIPTS_HEADERS_TO_SEND,
MAX_HEADERS_TO_SEND, MAX_RECEIPTS_HEADERS_TO_SEND,
};
/// The Chain Sync Supplier: answers requests from peers with available data
@ -79,13 +79,6 @@ impl SyncSupplier {
format!("Error sending receipts: {:?}", e)
})
}
GetNodeDataPacket => {
SyncSupplier::return_rlp(io, &rlp, peer, SyncSupplier::return_node_data, |e| {
format!("Error sending nodes: {:?}", e)
})
}
GetSnapshotManifestPacket => SyncSupplier::return_rlp(
io,
&rlp,
@ -252,38 +245,6 @@ impl SyncSupplier {
Ok(Some((BlockBodiesPacket.id(), rlp)))
}
/// Respond to GetNodeData request
fn return_node_data(io: &dyn SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult {
let payload_soft_limit = io.payload_soft_limit();
let mut count = r.item_count().unwrap_or(0);
trace!(target: "sync", "{} -> GetNodeData: {} entries", peer_id, count);
if count == 0 {
debug!(target: "sync", "Empty GetNodeData request, ignoring.");
return Ok(None);
}
count = cmp::min(count, MAX_NODE_DATA_TO_SEND);
let mut added = 0usize;
let mut data = Vec::new();
let mut total_bytes = 0;
for i in 0..count {
if let Some(node) = io.chain().state_data(&r.val_at::<H256>(i)?) {
total_bytes += node.len();
// Check that the packet won't be oversized
if total_bytes > payload_soft_limit {
break;
}
data.push(node);
added += 1;
}
}
trace!(target: "sync", "{} -> GetNodeData: return {} entries", peer_id, added);
let mut rlp = RlpStream::new_list(added);
for d in data {
rlp.append(&d);
}
Ok(Some((NodeDataPacket.id(), rlp)))
}
fn return_receipts(io: &dyn SyncIo, rlp: &Rlp, peer_id: PeerId) -> RlpResponseResult {
let payload_soft_limit = io.payload_soft_limit();
let mut count = rlp.item_count().unwrap_or(0);
@ -564,50 +525,6 @@ mod test {
assert!(Rlp::new(&large_result.out()).item_count().unwrap() < large_num_blocks);
}
#[test]
fn return_nodes() {
let mut client = TestBlockChainClient::new();
let queue = RwLock::new(VecDeque::new());
let sync = dummy_sync_with_peer(H256::new(), &client);
let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &ss, &queue, None);
let mut node_list = RlpStream::new_list(3);
node_list.append(&H256::from(
"0000000000000000000000000000000000000000000000005555555555555555",
));
node_list.append(&H256::from(
"ffffffffffffffffffffffffffffffffffffffffffffaaaaaaaaaaaaaaaaaaaa",
));
node_list.append(&H256::from(
"aff0000000000000000000000000000000000000000000000000000000000000",
));
let node_request = node_list.out();
// it returns rlp ONLY for hashes started with "f"
let result = SyncSupplier::return_node_data(&io, &Rlp::new(&node_request.clone()), 0);
assert!(result.is_ok());
let rlp_result = result.unwrap();
assert!(rlp_result.is_some());
// the length of one rlp-encoded hashe
let rlp = rlp_result.unwrap().1.out();
let rlp = Rlp::new(&rlp);
assert_eq!(Ok(1), rlp.item_count());
io.sender = Some(2usize);
SyncSupplier::dispatch_packet(
&RwLock::new(sync),
&mut io,
0usize,
GetNodeDataPacket.id(),
&node_request,
);
assert_eq!(1, io.packets.len());
}
#[test]
fn return_receipts_empty() {
let mut client = TestBlockChainClient::new();

View File

@ -45,8 +45,8 @@ pub enum SyncPacket {
BlockBodiesPacket = 0x06,
NewBlockPacket = 0x07,
GetNodeDataPacket = 0x0d,
NodeDataPacket = 0x0e,
//GetNodeDataPacket = 0x0d,
//NodeDataPacket = 0x0e,
GetReceiptsPacket = 0x0f,
ReceiptsPacket = 0x10,
@ -82,8 +82,8 @@ impl PacketInfo for SyncPacket {
| GetBlockBodiesPacket
| BlockBodiesPacket
| NewBlockPacket
| GetNodeDataPacket
| NodeDataPacket
//| GetNodeDataPacket
//| NodeDataPacket
| GetReceiptsPacket
| ReceiptsPacket => ETH_PROTOCOL,

View File

@ -52,6 +52,7 @@ mod codes {
pub const REQUEST_REJECTED_LIMIT: i64 = -32041;
pub const REQUEST_NOT_FOUND: i64 = -32042;
pub const ENCRYPTION_ERROR: i64 = -32055;
#[cfg(any(test, feature = "accounts"))]
pub const ENCODING_ERROR: i64 = -32058;
pub const FETCH_ERROR: i64 = -32060;
pub const NO_PEERS: i64 = -32066;

View File

@ -23,10 +23,8 @@ use std::{
};
use super::{
error_key_already_exists, error_negatively_reference_hash, memory_db::*, DB_PREFIX_LEN,
LATEST_ERA_KEY,
error_key_already_exists, error_negatively_reference_hash, memory_db::*, LATEST_ERA_KEY,
};
use bytes::Bytes;
use ethereum_types::H256;
use hash_db::HashDB;
use keccak_hasher::KeccakHasher;
@ -205,12 +203,6 @@ impl JournalDB for ArchiveDB {
self.latest_era
}
fn state(&self, id: &H256) -> Option<Bytes> {
self.backing
.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN])
.map(|b| b.into_vec())
}
fn is_pruned(&self) -> bool {
false
}
@ -494,24 +486,6 @@ mod tests {
}
}
#[test]
fn returns_state() {
let shared_db = Arc::new(kvdb_memorydb::create(0));
let key = {
let mut jdb = ArchiveDB::new(shared_db.clone(), None);
let key = jdb.insert(b"foo");
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
key
};
{
let jdb = ArchiveDB::new(shared_db, None);
let state = jdb.state(&key);
assert!(state.is_some());
}
}
#[test]
fn inject() {
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);

View File

@ -23,8 +23,7 @@ use std::{
};
use super::{
error_key_already_exists, error_negatively_reference_hash, traits::JournalDB, DB_PREFIX_LEN,
LATEST_ERA_KEY,
error_key_already_exists, error_negatively_reference_hash, traits::JournalDB, LATEST_ERA_KEY,
};
use bytes::Bytes;
use ethereum_types::H256;
@ -425,12 +424,6 @@ impl JournalDB for EarlyMergeDB {
}
}
fn state(&self, id: &H256) -> Option<Bytes> {
self.backing
.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN])
.map(|b| b.into_vec())
}
fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> {
// record new commit's details.
let mut refs = match self.refs.as_ref() {

View File

@ -23,7 +23,6 @@ use std::{
};
use super::{error_negatively_reference_hash, JournalDB, DB_PREFIX_LEN, LATEST_ERA_KEY};
use bytes::Bytes;
use ethereum_types::H256;
use fastmap::H256FastMap;
use hash_db::HashDB;
@ -323,26 +322,6 @@ impl JournalDB for OverlayRecentDB {
self.journal_overlay.read().earliest_era
}
fn state(&self, key: &H256) -> Option<Bytes> {
let journal_overlay = self.journal_overlay.read();
let key = to_short_key(key);
journal_overlay
.backing_overlay
.get(&key)
.map(|v| v.into_vec())
.or_else(|| {
journal_overlay
.pending_overlay
.get(&key)
.map(|d| d.clone().into_vec())
})
.or_else(|| {
self.backing
.get_by_prefix(self.column, &key[0..DB_PREFIX_LEN])
.map(|b| b.into_vec())
})
}
fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> {
trace!(target: "journaldb", "entry: #{} ({})", now, id);

View File

@ -18,8 +18,7 @@
use std::{collections::HashMap, io, sync::Arc};
use super::{traits::JournalDB, DB_PREFIX_LEN, LATEST_ERA_KEY};
use bytes::Bytes;
use super::{traits::JournalDB, LATEST_ERA_KEY};
use ethereum_types::H256;
use hash_db::HashDB;
use heapsize::HeapSizeOf;
@ -133,12 +132,6 @@ impl JournalDB for RefCountedDB {
self.latest_era
}
fn state(&self, id: &H256) -> Option<Bytes> {
self.backing
.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN])
.map(|b| b.into_vec())
}
fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> {
// record new commit's details.
let mut db_key = DatabaseKey {

View File

@ -18,7 +18,6 @@
use std::{io, sync::Arc};
use bytes::Bytes;
use ethereum_types::H256;
use hash_db::{AsHashDB, HashDB};
use keccak_hasher::KeccakHasher;
@ -81,9 +80,6 @@ pub trait JournalDB: KeyedHashDB {
/// from this point onwards.
fn inject(&mut self, batch: &mut DBTransaction) -> io::Result<u32>;
/// State data query
fn state(&self, _id: &H256) -> Option<Bytes>;
/// Whether this database is pruned.
fn is_pruned(&self) -> bool {
true