Run cargo fix on a few of the worst offenders (#10854)

* Run cargo fix on `vm`

* Run cargo fix on ethcore-db

* Run cargo fix on evm

* Run cargo fix on ethcore-light

* Run cargo fix on journaldb

* Run cargo fix on wasm

* Missing docs

* Run cargo fix on ethcore-sync
This commit is contained in:
David 2019-07-09 10:04:20 +02:00 committed by Seun LanLege
parent fdc7b0fdaa
commit f53c3e582c
46 changed files with 369 additions and 368 deletions

View File

@ -91,13 +91,13 @@ pub trait Key<T> {
/// Should be used to write value into database. /// Should be used to write value into database.
pub trait Writable { pub trait Writable {
/// Writes the value into the database. /// Writes the value into the database.
fn write<T, R>(&mut self, col: Option<u32>, key: &Key<T, Target = R>, value: &T) where T: rlp::Encodable, R: AsRef<[u8]>; fn write<T, R>(&mut self, col: Option<u32>, key: &dyn Key<T, Target = R>, value: &T) where T: rlp::Encodable, R: AsRef<[u8]>;
/// Deletes key from the databse. /// Deletes key from the databse.
fn delete<T, R>(&mut self, col: Option<u32>, key: &Key<T, Target = R>) where T: rlp::Encodable, R: AsRef<[u8]>; fn delete<T, R>(&mut self, col: Option<u32>, key: &dyn Key<T, Target = R>) where T: rlp::Encodable, R: AsRef<[u8]>;
/// Writes the value into the database and updates the cache. /// Writes the value into the database and updates the cache.
fn write_with_cache<K, T, R>(&mut self, col: Option<u32>, cache: &mut Cache<K, T>, key: K, value: T, policy: CacheUpdatePolicy) where fn write_with_cache<K, T, R>(&mut self, col: Option<u32>, cache: &mut dyn Cache<K, T>, key: K, value: T, policy: CacheUpdatePolicy) where
K: Key<T, Target = R> + Hash + Eq, K: Key<T, Target = R> + Hash + Eq,
T: rlp::Encodable, T: rlp::Encodable,
R: AsRef<[u8]> { R: AsRef<[u8]> {
@ -113,7 +113,7 @@ pub trait Writable {
} }
/// Writes the values into the database and updates the cache. /// Writes the values into the database and updates the cache.
fn extend_with_cache<K, T, R>(&mut self, col: Option<u32>, cache: &mut Cache<K, T>, values: HashMap<K, T>, policy: CacheUpdatePolicy) where fn extend_with_cache<K, T, R>(&mut self, col: Option<u32>, cache: &mut dyn Cache<K, T>, values: HashMap<K, T>, policy: CacheUpdatePolicy) where
K: Key<T, Target = R> + Hash + Eq, K: Key<T, Target = R> + Hash + Eq,
T: rlp::Encodable, T: rlp::Encodable,
R: AsRef<[u8]> { R: AsRef<[u8]> {
@ -134,7 +134,7 @@ pub trait Writable {
} }
/// Writes and removes the values into the database and updates the cache. /// Writes and removes the values into the database and updates the cache.
fn extend_with_option_cache<K, T, R>(&mut self, col: Option<u32>, cache: &mut Cache<K, Option<T>>, values: HashMap<K, Option<T>>, policy: CacheUpdatePolicy) where fn extend_with_option_cache<K, T, R>(&mut self, col: Option<u32>, cache: &mut dyn Cache<K, Option<T>>, values: HashMap<K, Option<T>>, policy: CacheUpdatePolicy) where
K: Key<T, Target = R> + Hash + Eq, K: Key<T, Target = R> + Hash + Eq,
T: rlp::Encodable, T: rlp::Encodable,
R: AsRef<[u8]> { R: AsRef<[u8]> {
@ -165,7 +165,7 @@ pub trait Writable {
/// Should be used to read values from database. /// Should be used to read values from database.
pub trait Readable { pub trait Readable {
/// Returns value for given key. /// Returns value for given key.
fn read<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>) -> Option<T> where fn read<T, R>(&self, col: Option<u32>, key: &dyn Key<T, Target = R>) -> Option<T> where
T: rlp::Decodable, T: rlp::Decodable,
R: AsRef<[u8]>; R: AsRef<[u8]>;
@ -189,7 +189,7 @@ pub trait Readable {
} }
/// Returns true if given value exists. /// Returns true if given value exists.
fn exists<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>) -> bool where R: AsRef<[u8]>; fn exists<T, R>(&self, col: Option<u32>, key: &dyn Key<T, Target = R>) -> bool where R: AsRef<[u8]>;
/// Returns true if given value exists either in cache or in database. /// Returns true if given value exists either in cache or in database.
fn exists_with_cache<K, T, R, C>(&self, col: Option<u32>, cache: &RwLock<C>, key: &K) -> bool where fn exists_with_cache<K, T, R, C>(&self, col: Option<u32>, cache: &RwLock<C>, key: &K) -> bool where
@ -208,17 +208,17 @@ pub trait Readable {
} }
impl Writable for DBTransaction { impl Writable for DBTransaction {
fn write<T, R>(&mut self, col: Option<u32>, key: &Key<T, Target = R>, value: &T) where T: rlp::Encodable, R: AsRef<[u8]> { fn write<T, R>(&mut self, col: Option<u32>, key: &dyn Key<T, Target = R>, value: &T) where T: rlp::Encodable, R: AsRef<[u8]> {
self.put(col, key.key().as_ref(), &rlp::encode(value)); self.put(col, key.key().as_ref(), &rlp::encode(value));
} }
fn delete<T, R>(&mut self, col: Option<u32>, key: &Key<T, Target = R>) where T: rlp::Encodable, R: AsRef<[u8]> { fn delete<T, R>(&mut self, col: Option<u32>, key: &dyn Key<T, Target = R>) where T: rlp::Encodable, R: AsRef<[u8]> {
self.delete(col, key.key().as_ref()); self.delete(col, key.key().as_ref());
} }
} }
impl<KVDB: KeyValueDB + ?Sized> Readable for KVDB { impl<KVDB: KeyValueDB + ?Sized> Readable for KVDB {
fn read<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>) -> Option<T> fn read<T, R>(&self, col: Option<u32>, key: &dyn Key<T, Target = R>) -> Option<T>
where T: rlp::Decodable, R: AsRef<[u8]> { where T: rlp::Decodable, R: AsRef<[u8]> {
self.get(col, key.key().as_ref()) self.get(col, key.key().as_ref())
.expect(&format!("db get failed, key: {:?}", key.key().as_ref())) .expect(&format!("db get failed, key: {:?}", key.key().as_ref()))
@ -226,7 +226,7 @@ impl<KVDB: KeyValueDB + ?Sized> Readable for KVDB {
} }
fn exists<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>) -> bool where R: AsRef<[u8]> { fn exists<T, R>(&self, col: Option<u32>, key: &dyn Key<T, Target = R>) -> bool where R: AsRef<[u8]> {
let result = self.get(col, key.key().as_ref()); let result = self.get(col, key.key().as_ref());
match result { match result {

View File

@ -33,7 +33,7 @@ pub struct Factory {
impl Factory { impl Factory {
/// Create fresh instance of VM /// Create fresh instance of VM
/// Might choose implementation depending on supplied gas. /// Might choose implementation depending on supplied gas.
pub fn create(&self, params: ActionParams, schedule: &Schedule, depth: usize) -> Box<Exec> { pub fn create(&self, params: ActionParams, schedule: &Schedule, depth: usize) -> Box<dyn Exec> {
match self.evm { match self.evm {
VMType::Interpreter => if Self::can_fit_in_usize(&params.gas) { VMType::Interpreter => if Self::can_fit_in_usize(&params.gas) {
Box::new(super::interpreter::Interpreter::<usize>::new(params, self.evm_cache.clone(), schedule, depth)) Box::new(super::interpreter::Interpreter::<usize>::new(params, self.evm_cache.clone(), schedule, depth))

View File

@ -106,10 +106,10 @@ impl<Gas: evm::CostType> Gasometer<Gas> {
/// it will be the amount of gas that the current context provides to the child context. /// it will be the amount of gas that the current context provides to the child context.
pub fn requirements( pub fn requirements(
&mut self, &mut self,
ext: &vm::Ext, ext: &dyn vm::Ext,
instruction: Instruction, instruction: Instruction,
info: &InstructionInfo, info: &InstructionInfo,
stack: &Stack<U256>, stack: &dyn Stack<U256>,
current_mem_size: usize, current_mem_size: usize,
) -> vm::Result<InstructionRequirements<Gas>> { ) -> vm::Result<InstructionRequirements<Gas>> {
let schedule = ext.schedule(); let schedule = ext.schedule();
@ -402,7 +402,7 @@ fn calculate_eip1283_sstore_gas<Gas: evm::CostType>(schedule: &Schedule, origina
) )
} }
pub fn handle_eip1283_sstore_clears_refund(ext: &mut vm::Ext, original: &U256, current: &U256, new: &U256) { pub fn handle_eip1283_sstore_clears_refund(ext: &mut dyn vm::Ext, original: &U256, current: &U256, new: &U256) {
let sstore_clears_schedule = ext.schedule().sstore_refund_gas; let sstore_clears_schedule = ext.schedule().sstore_refund_gas;
if current == new { if current == new {

View File

@ -136,7 +136,7 @@ mod tests {
#[test] #[test]
fn test_memory_read_and_write() { fn test_memory_read_and_write() {
// given // given
let mem: &mut Memory = &mut vec![]; let mem: &mut dyn Memory = &mut vec![];
mem.resize(0x80 + 32); mem.resize(0x80 + 32);
// when // when
@ -149,7 +149,7 @@ mod tests {
#[test] #[test]
fn test_memory_read_and_write_byte() { fn test_memory_read_and_write_byte() {
// given // given
let mem: &mut Memory = &mut vec![]; let mem: &mut dyn Memory = &mut vec![];
mem.resize(32); mem.resize(32);
// when // when
@ -163,7 +163,7 @@ mod tests {
#[test] #[test]
fn test_memory_read_slice_and_write_slice() { fn test_memory_read_slice_and_write_slice() {
let mem: &mut Memory = &mut vec![]; let mem: &mut dyn Memory = &mut vec![];
mem.resize(32); mem.resize(32);
{ {

View File

@ -197,7 +197,7 @@ pub struct Interpreter<Cost: CostType> {
} }
impl<Cost: 'static + CostType> vm::Exec for Interpreter<Cost> { impl<Cost: 'static + CostType> vm::Exec for Interpreter<Cost> {
fn exec(mut self: Box<Self>, ext: &mut vm::Ext) -> vm::ExecTrapResult<GasLeft> { fn exec(mut self: Box<Self>, ext: &mut dyn vm::Ext) -> vm::ExecTrapResult<GasLeft> {
loop { loop {
let result = self.step(ext); let result = self.step(ext);
match result { match result {
@ -218,7 +218,7 @@ impl<Cost: 'static + CostType> vm::Exec for Interpreter<Cost> {
} }
impl<Cost: 'static + CostType> vm::ResumeCall for Interpreter<Cost> { impl<Cost: 'static + CostType> vm::ResumeCall for Interpreter<Cost> {
fn resume_call(mut self: Box<Self>, result: MessageCallResult) -> Box<vm::Exec> { fn resume_call(mut self: Box<Self>, result: MessageCallResult) -> Box<dyn vm::Exec> {
{ {
let this = &mut *self; let this = &mut *self;
let (out_off, out_size) = this.resume_output_range.take().expect("Box<ResumeCall> is obtained from a call opcode; resume_output_range is always set after those opcodes are executed; qed"); let (out_off, out_size) = this.resume_output_range.take().expect("Box<ResumeCall> is obtained from a call opcode; resume_output_range is always set after those opcodes are executed; qed");
@ -253,7 +253,7 @@ impl<Cost: 'static + CostType> vm::ResumeCall for Interpreter<Cost> {
} }
impl<Cost: 'static + CostType> vm::ResumeCreate for Interpreter<Cost> { impl<Cost: 'static + CostType> vm::ResumeCreate for Interpreter<Cost> {
fn resume_create(mut self: Box<Self>, result: ContractCreateResult) -> Box<vm::Exec> { fn resume_create(mut self: Box<Self>, result: ContractCreateResult) -> Box<dyn vm::Exec> {
match result { match result {
ContractCreateResult::Created(address, gas_left) => { ContractCreateResult::Created(address, gas_left) => {
self.stack.push(address_to_u256(address)); self.stack.push(address_to_u256(address));
@ -299,7 +299,7 @@ impl<Cost: CostType> Interpreter<Cost> {
/// Execute a single step on the VM. /// Execute a single step on the VM.
#[inline(always)] #[inline(always)]
pub fn step(&mut self, ext: &mut vm::Ext) -> InterpreterResult { pub fn step(&mut self, ext: &mut dyn vm::Ext) -> InterpreterResult {
if self.done { if self.done {
return InterpreterResult::Stopped; return InterpreterResult::Stopped;
} }
@ -321,7 +321,7 @@ impl<Cost: CostType> Interpreter<Cost> {
/// Inner helper function for step. /// Inner helper function for step.
#[inline(always)] #[inline(always)]
fn step_inner(&mut self, ext: &mut vm::Ext) -> Result<Never, InterpreterResult> { fn step_inner(&mut self, ext: &mut dyn vm::Ext) -> Result<Never, InterpreterResult> {
let result = match self.resume_result.take() { let result = match self.resume_result.take() {
Some(result) => result, Some(result) => result,
None => { None => {
@ -417,7 +417,7 @@ impl<Cost: CostType> Interpreter<Cost> {
Err(InterpreterResult::Continue) Err(InterpreterResult::Continue)
} }
fn verify_instruction(&self, ext: &vm::Ext, instruction: Instruction, info: &InstructionInfo) -> vm::Result<()> { fn verify_instruction(&self, ext: &dyn vm::Ext, instruction: Instruction, info: &InstructionInfo) -> vm::Result<()> {
let schedule = ext.schedule(); let schedule = ext.schedule();
if (instruction == instructions::DELEGATECALL && !schedule.have_delegate_call) || if (instruction == instructions::DELEGATECALL && !schedule.have_delegate_call) ||
@ -452,7 +452,7 @@ impl<Cost: CostType> Interpreter<Cost> {
fn mem_written( fn mem_written(
instruction: Instruction, instruction: Instruction,
stack: &Stack<U256> stack: &dyn Stack<U256>
) -> Option<(usize, usize)> { ) -> Option<(usize, usize)> {
let read = |pos| stack.peek(pos).low_u64() as usize; let read = |pos| stack.peek(pos).low_u64() as usize;
let written = match instruction { let written = match instruction {
@ -473,7 +473,7 @@ impl<Cost: CostType> Interpreter<Cost> {
fn store_written( fn store_written(
instruction: Instruction, instruction: Instruction,
stack: &Stack<U256> stack: &dyn Stack<U256>
) -> Option<(U256, U256)> { ) -> Option<(U256, U256)> {
match instruction { match instruction {
instructions::SSTORE => Some((stack.peek(0).clone(), stack.peek(1).clone())), instructions::SSTORE => Some((stack.peek(0).clone(), stack.peek(1).clone())),
@ -484,7 +484,7 @@ impl<Cost: CostType> Interpreter<Cost> {
fn exec_instruction( fn exec_instruction(
&mut self, &mut self,
gas: Cost, gas: Cost,
ext: &mut vm::Ext, ext: &mut dyn vm::Ext,
instruction: Instruction, instruction: Instruction,
provided: Option<Cost> provided: Option<Cost>
) -> vm::Result<InstructionResult<Cost>> { ) -> vm::Result<InstructionResult<Cost>> {
@ -1111,7 +1111,7 @@ impl<Cost: CostType> Interpreter<Cost> {
Ok(InstructionResult::Ok) Ok(InstructionResult::Ok)
} }
fn copy_data_to_memory(mem: &mut Vec<u8>, stack: &mut Stack<U256>, source: &[u8]) { fn copy_data_to_memory(mem: &mut Vec<u8>, stack: &mut dyn Stack<U256>, source: &[u8]) {
let dest_offset = stack.pop_back(); let dest_offset = stack.pop_back();
let source_offset = stack.pop_back(); let source_offset = stack.pop_back();
let size = stack.pop_back(); let size = stack.pop_back();
@ -1194,7 +1194,7 @@ mod tests {
use vm::tests::{FakeExt, test_finalize}; use vm::tests::{FakeExt, test_finalize};
use ethereum_types::Address; use ethereum_types::Address;
fn interpreter(params: ActionParams, ext: &vm::Ext) -> Box<Exec> { fn interpreter(params: ActionParams, ext: &dyn vm::Ext) -> Box<dyn Exec> {
Factory::new(VMType::Interpreter, 1).create(params, ext.schedule(), ext.depth()) Factory::new(VMType::Interpreter, 1).create(params, ext.schedule(), ext.depth())
} }
@ -1213,7 +1213,7 @@ mod tests {
ext.tracing = true; ext.tracing = true;
let gas_left = { let gas_left = {
let mut vm = interpreter(params, &ext); let vm = interpreter(params, &ext);
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -1235,7 +1235,7 @@ mod tests {
ext.tracing = true; ext.tracing = true;
let err = { let err = {
let mut vm = interpreter(params, &ext); let vm = interpreter(params, &ext);
test_finalize(vm.exec(&mut ext).ok().unwrap()).err().unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).err().unwrap()
}; };

View File

@ -38,7 +38,7 @@ fn test_add(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -58,7 +58,7 @@ fn test_sha3(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -78,7 +78,7 @@ fn test_address(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -100,7 +100,7 @@ fn test_origin(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -122,7 +122,7 @@ fn test_sender(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -157,7 +157,7 @@ fn test_extcodecopy(factory: super::Factory) {
ext.codes.insert(sender, Arc::new(sender_code)); ext.codes.insert(sender, Arc::new(sender_code));
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -177,7 +177,7 @@ fn test_log_empty(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -209,7 +209,7 @@ fn test_log_sender(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -234,7 +234,7 @@ fn test_blockhash(factory: super::Factory) {
ext.blockhashes.insert(U256::zero(), blockhash.clone()); ext.blockhashes.insert(U256::zero(), blockhash.clone());
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -256,7 +256,7 @@ fn test_calldataload(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -277,7 +277,7 @@ fn test_author(factory: super::Factory) {
ext.info.author = author; ext.info.author = author;
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -297,7 +297,7 @@ fn test_timestamp(factory: super::Factory) {
ext.info.timestamp = timestamp; ext.info.timestamp = timestamp;
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -317,7 +317,7 @@ fn test_number(factory: super::Factory) {
ext.info.number = number; ext.info.number = number;
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -337,7 +337,7 @@ fn test_difficulty(factory: super::Factory) {
ext.info.difficulty = difficulty; ext.info.difficulty = difficulty;
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -357,7 +357,7 @@ fn test_gas_limit(factory: super::Factory) {
ext.info.gas_limit = gas_limit; ext.info.gas_limit = gas_limit;
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -375,7 +375,7 @@ fn test_mul(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -393,7 +393,7 @@ fn test_sub(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -411,7 +411,7 @@ fn test_div(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -429,7 +429,7 @@ fn test_div_zero(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -447,7 +447,7 @@ fn test_mod(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -466,7 +466,7 @@ fn test_smod(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -485,7 +485,7 @@ fn test_sdiv(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -504,7 +504,7 @@ fn test_exp(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -524,7 +524,7 @@ fn test_comparison(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -545,7 +545,7 @@ fn test_signed_comparison(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -566,7 +566,7 @@ fn test_bitops(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -589,7 +589,7 @@ fn test_addmod_mulmod(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -610,7 +610,7 @@ fn test_byte(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -629,7 +629,7 @@ fn test_signextend(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -649,7 +649,7 @@ fn test_badinstruction_int() {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let err = { let err = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap_err() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap_err()
}; };
@ -669,7 +669,7 @@ fn test_pop(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -689,7 +689,7 @@ fn test_extops(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -712,7 +712,7 @@ fn test_jumps(factory: super::Factory) {
let mut ext = FakeExt::new(); let mut ext = FakeExt::new();
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -740,7 +740,7 @@ fn test_calls(factory: super::Factory) {
}; };
let gas_left = { let gas_left = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -781,7 +781,7 @@ fn test_create_in_staticcall(factory: super::Factory) {
ext.is_static = true; ext.is_static = true;
let err = { let err = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap_err() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap_err()
}; };
@ -1049,7 +1049,7 @@ fn push_two_pop_one_constantinople_test(factory: &super::Factory, opcode: u8, pu
let mut ext = FakeExt::new_constantinople(); let mut ext = FakeExt::new_constantinople();
let _ = { let _ = {
let mut vm = factory.create(params, ext.schedule(), ext.depth()); let vm = factory.create(params, ext.schedule(), ext.depth());
test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(vm.exec(&mut ext).ok().unwrap()).unwrap()
}; };

View File

@ -74,7 +74,7 @@ impl<DB: HashDB<KeccakHasher, DBValue>> CHT<DB> {
if block_to_cht_number(num) != Some(self.number) { return Ok(None) } if block_to_cht_number(num) != Some(self.number) { return Ok(None) }
let mut recorder = Recorder::with_depth(from_level); let mut recorder = Recorder::with_depth(from_level);
let db: &HashDB<_,_> = &self.db; let db: &dyn HashDB<_,_> = &self.db;
let t = TrieDB::new(&db, &self.root)?; let t = TrieDB::new(&db, &self.root)?;
t.get_with(&key!(num), &mut recorder)?; t.get_with(&key!(num), &mut recorder)?;

View File

@ -47,8 +47,8 @@ pub trait ChainDataFetcher: Send + Sync + 'static {
fn epoch_transition( fn epoch_transition(
&self, &self,
_hash: H256, _hash: H256,
_engine: Arc<Engine>, _engine: Arc<dyn Engine>,
_checker: Arc<StateDependentProof> _checker: Arc<dyn StateDependentProof>
) -> Self::Transition; ) -> Self::Transition;
} }
@ -76,8 +76,8 @@ impl ChainDataFetcher for Unavailable {
fn epoch_transition( fn epoch_transition(
&self, &self,
_hash: H256, _hash: H256,
_engine: Arc<Engine>, _engine: Arc<dyn Engine>,
_checker: Arc<StateDependentProof> _checker: Arc<dyn StateDependentProof>
) -> Self::Transition { ) -> Self::Transition {
Err("fetching epoch transition proofs unavailable") Err("fetching epoch transition proofs unavailable")
} }

View File

@ -213,7 +213,7 @@ pub struct HeaderChain {
#[ignore_malloc_size_of = "ignored for performance reason"] #[ignore_malloc_size_of = "ignored for performance reason"]
live_epoch_proofs: RwLock<H256FastMap<EpochTransition>>, live_epoch_proofs: RwLock<H256FastMap<EpochTransition>>,
#[ignore_malloc_size_of = "ignored for performance reason"] #[ignore_malloc_size_of = "ignored for performance reason"]
db: Arc<KeyValueDB>, db: Arc<dyn KeyValueDB>,
#[ignore_malloc_size_of = "ignored for performance reason"] #[ignore_malloc_size_of = "ignored for performance reason"]
col: Option<u32>, col: Option<u32>,
#[ignore_malloc_size_of = "ignored for performance reason"] #[ignore_malloc_size_of = "ignored for performance reason"]
@ -223,7 +223,7 @@ pub struct HeaderChain {
impl HeaderChain { impl HeaderChain {
/// Create a new header chain given this genesis block and database to read from. /// Create a new header chain given this genesis block and database to read from.
pub fn new( pub fn new(
db: Arc<KeyValueDB>, db: Arc<dyn KeyValueDB>,
col: Option<u32>, col: Option<u32>,
spec: &Spec, spec: &Spec,
cache: Arc<Mutex<Cache>>, cache: Arc<Mutex<Cache>>,
@ -880,7 +880,7 @@ mod tests {
use std::time::Duration; use std::time::Duration;
use parking_lot::Mutex; use parking_lot::Mutex;
fn make_db() -> Arc<KeyValueDB> { fn make_db() -> Arc<dyn KeyValueDB> {
Arc::new(kvdb_memorydb::create(0)) Arc::new(kvdb_memorydb::create(0))
} }

View File

@ -79,7 +79,7 @@ impl Default for Config {
/// Trait for interacting with the header chain abstractly. /// Trait for interacting with the header chain abstractly.
pub trait LightChainClient: Send + Sync { pub trait LightChainClient: Send + Sync {
/// Adds a new `LightChainNotify` listener. /// Adds a new `LightChainNotify` listener.
fn add_listener(&self, listener: Weak<LightChainNotify>); fn add_listener(&self, listener: Weak<dyn LightChainNotify>);
/// Get chain info. /// Get chain info.
fn chain_info(&self) -> BlockChainInfo; fn chain_info(&self) -> BlockChainInfo;
@ -104,7 +104,7 @@ pub trait LightChainClient: Send + Sync {
fn score(&self, id: BlockId) -> Option<U256>; fn score(&self, id: BlockId) -> Option<U256>;
/// Get an iterator over a block and its ancestry. /// Get an iterator over a block and its ancestry.
fn ancestry_iter<'a>(&'a self, start: BlockId) -> Box<Iterator<Item=encoded::Header> + 'a>; fn ancestry_iter<'a>(&'a self, start: BlockId) -> Box<dyn Iterator<Item=encoded::Header> + 'a>;
/// Get the signing chain ID. /// Get the signing chain ID.
fn signing_chain_id(&self) -> Option<u64>; fn signing_chain_id(&self) -> Option<u64>;
@ -114,7 +114,7 @@ pub trait LightChainClient: Send + Sync {
fn env_info(&self, id: BlockId) -> Option<EnvInfo>; fn env_info(&self, id: BlockId) -> Option<EnvInfo>;
/// Get a handle to the consensus engine. /// Get a handle to the consensus engine.
fn engine(&self) -> &Arc<Engine>; fn engine(&self) -> &Arc<dyn Engine>;
/// Query whether a block is known. /// Query whether a block is known.
fn is_known(&self, hash: &H256) -> bool; fn is_known(&self, hash: &H256) -> bool;
@ -159,23 +159,23 @@ impl<T: LightChainClient> AsLightClient for T {
/// Light client implementation. /// Light client implementation.
pub struct Client<T> { pub struct Client<T> {
queue: HeaderQueue, queue: HeaderQueue,
engine: Arc<Engine>, engine: Arc<dyn Engine>,
chain: HeaderChain, chain: HeaderChain,
report: RwLock<ClientReport>, report: RwLock<ClientReport>,
import_lock: Mutex<()>, import_lock: Mutex<()>,
db: Arc<KeyValueDB>, db: Arc<dyn KeyValueDB>,
listeners: RwLock<Vec<Weak<LightChainNotify>>>, listeners: RwLock<Vec<Weak<dyn LightChainNotify>>>,
fetcher: T, fetcher: T,
verify_full: bool, verify_full: bool,
/// A closure to call when we want to restart the client /// A closure to call when we want to restart the client
exit_handler: Mutex<Option<Box<Fn(String) + 'static + Send>>>, exit_handler: Mutex<Option<Box<dyn Fn(String) + 'static + Send>>>,
} }
impl<T: ChainDataFetcher> Client<T> { impl<T: ChainDataFetcher> Client<T> {
/// Create a new `Client`. /// Create a new `Client`.
pub fn new( pub fn new(
config: Config, config: Config,
db: Arc<KeyValueDB>, db: Arc<dyn KeyValueDB>,
chain_col: Option<u32>, chain_col: Option<u32>,
spec: &Spec, spec: &Spec,
fetcher: T, fetcher: T,
@ -208,7 +208,7 @@ impl<T: ChainDataFetcher> Client<T> {
} }
/// Adds a new `LightChainNotify` listener. /// Adds a new `LightChainNotify` listener.
pub fn add_listener(&self, listener: Weak<LightChainNotify>) { pub fn add_listener(&self, listener: Weak<dyn LightChainNotify>) {
self.listeners.write().push(listener); self.listeners.write().push(listener);
} }
@ -375,7 +375,7 @@ impl<T: ChainDataFetcher> Client<T> {
} }
/// Get a handle to the verification engine. /// Get a handle to the verification engine.
pub fn engine(&self) -> &Arc<Engine> { pub fn engine(&self) -> &Arc<dyn Engine> {
&self.engine &self.engine
} }
@ -416,7 +416,7 @@ impl<T: ChainDataFetcher> Client<T> {
Arc::new(v) Arc::new(v)
} }
fn notify<F: Fn(&LightChainNotify)>(&self, f: F) { fn notify<F: Fn(&dyn LightChainNotify)>(&self, f: F) {
for listener in &*self.listeners.read() { for listener in &*self.listeners.read() {
if let Some(listener) = listener.upgrade() { if let Some(listener) = listener.upgrade() {
f(&*listener) f(&*listener)
@ -536,7 +536,7 @@ impl<T: ChainDataFetcher> Client<T> {
impl<T: ChainDataFetcher> LightChainClient for Client<T> { impl<T: ChainDataFetcher> LightChainClient for Client<T> {
fn add_listener(&self, listener: Weak<LightChainNotify>) { fn add_listener(&self, listener: Weak<dyn LightChainNotify>) {
Client::add_listener(self, listener) Client::add_listener(self, listener)
} }
@ -566,7 +566,7 @@ impl<T: ChainDataFetcher> LightChainClient for Client<T> {
Client::score(self, id) Client::score(self, id)
} }
fn ancestry_iter<'a>(&'a self, start: BlockId) -> Box<Iterator<Item=encoded::Header> + 'a> { fn ancestry_iter<'a>(&'a self, start: BlockId) -> Box<dyn Iterator<Item=encoded::Header> + 'a> {
Box::new(Client::ancestry_iter(self, start)) Box::new(Client::ancestry_iter(self, start))
} }
@ -578,7 +578,7 @@ impl<T: ChainDataFetcher> LightChainClient for Client<T> {
Client::env_info(self, id) Client::env_info(self, id)
} }
fn engine(&self) -> &Arc<Engine> { fn engine(&self) -> &Arc<dyn Engine> {
Client::engine(self) Client::engine(self)
} }
@ -633,7 +633,7 @@ impl<T: ChainDataFetcher> ::ethcore::client::EngineClient for Client<T> {
}) })
} }
fn as_full_client(&self) -> Option<&::ethcore::client::BlockChainClient> { fn as_full_client(&self) -> Option<&dyn (::ethcore::client::BlockChainClient)> {
None None
} }

View File

@ -65,7 +65,7 @@ pub struct Service<T> {
impl<T: ChainDataFetcher> Service<T> { impl<T: ChainDataFetcher> Service<T> {
/// Start the service: initialize I/O workers and client itself. /// Start the service: initialize I/O workers and client itself.
pub fn start(config: ClientConfig, spec: &Spec, fetcher: T, db: Arc<BlockChainDB>, cache: Arc<Mutex<Cache>>) -> Result<Self, Error> { pub fn start(config: ClientConfig, spec: &Spec, fetcher: T, db: Arc<dyn BlockChainDB>, cache: Arc<Mutex<Cache>>) -> Result<Self, Error> {
let io_service = IoService::<ClientIoMessage>::start().map_err(Error::Io)?; let io_service = IoService::<ClientIoMessage>::start().map_err(Error::Io)?;
let client = Arc::new(Client::new(config, let client = Arc::new(Client::new(config,
db.key_value().clone(), db.key_value().clone(),
@ -85,12 +85,12 @@ impl<T: ChainDataFetcher> Service<T> {
} }
/// Set the actor to be notified on certain chain events /// Set the actor to be notified on certain chain events
pub fn add_notify(&self, notify: Arc<LightChainNotify>) { pub fn add_notify(&self, notify: Arc<dyn LightChainNotify>) {
self.client.add_listener(Arc::downgrade(&notify)); self.client.add_listener(Arc::downgrade(&notify));
} }
/// Register an I/O handler on the service. /// Register an I/O handler on the service.
pub fn register_handler(&self, handler: Arc<IoHandler<ClientIoMessage> + Send>) -> Result<(), IoError> { pub fn register_handler(&self, handler: Arc<dyn IoHandler<ClientIoMessage> + Send>) -> Result<(), IoError> {
self.io_service.register_handler(handler) self.io_service.register_handler(handler)
} }

View File

@ -116,13 +116,13 @@ pub trait EventContext: BasicContext {
fn peer(&self) -> PeerId; fn peer(&self) -> PeerId;
/// Treat the event context as a basic context. /// Treat the event context as a basic context.
fn as_basic(&self) -> &BasicContext; fn as_basic(&self) -> &dyn BasicContext;
} }
/// Basic context. /// Basic context.
pub struct TickCtx<'a> { pub struct TickCtx<'a> {
/// Io context to enable dispatch. /// Io context to enable dispatch.
pub io: &'a IoContext, pub io: &'a dyn IoContext,
/// Protocol implementation. /// Protocol implementation.
pub proto: &'a LightProtocol, pub proto: &'a LightProtocol,
} }
@ -153,7 +153,7 @@ impl<'a> BasicContext for TickCtx<'a> {
/// an io context. /// an io context.
pub struct Ctx<'a> { pub struct Ctx<'a> {
/// Io context to enable immediate response to events. /// Io context to enable immediate response to events.
pub io: &'a IoContext, pub io: &'a dyn IoContext,
/// Protocol implementation. /// Protocol implementation.
pub proto: &'a LightProtocol, pub proto: &'a LightProtocol,
/// Relevant peer for event. /// Relevant peer for event.
@ -187,7 +187,7 @@ impl<'a> EventContext for Ctx<'a> {
self.peer self.peer
} }
fn as_basic(&self) -> &BasicContext { fn as_basic(&self) -> &dyn BasicContext {
&*self &*self
} }
} }

View File

@ -82,7 +82,7 @@ pub struct LoadDistribution {
impl LoadDistribution { impl LoadDistribution {
/// Load rolling samples from the given store. /// Load rolling samples from the given store.
pub fn load(store: &SampleStore) -> Self { pub fn load(store: &dyn SampleStore) -> Self {
let mut samples = store.load(); let mut samples = store.load();
for kind_samples in samples.values_mut() { for kind_samples in samples.values_mut() {
@ -133,7 +133,7 @@ impl LoadDistribution {
} }
/// End the current time period. Provide a store to /// End the current time period. Provide a store to
pub fn end_period(&self, store: &SampleStore) { pub fn end_period(&self, store: &dyn SampleStore) {
let active_period = self.active_period.read(); let active_period = self.active_period.read();
let mut samples = self.samples.write(); let mut samples = self.samples.write();

View File

@ -236,25 +236,25 @@ pub trait Handler: Send + Sync {
/// Called when a peer connects. /// Called when a peer connects.
fn on_connect( fn on_connect(
&self, &self,
_ctx: &EventContext, _ctx: &dyn EventContext,
_status: &Status, _status: &Status,
_capabilities: &Capabilities _capabilities: &Capabilities
) -> PeerStatus { PeerStatus::Kept } ) -> PeerStatus { PeerStatus::Kept }
/// Called when a peer disconnects, with a list of unfulfilled request IDs as /// Called when a peer disconnects, with a list of unfulfilled request IDs as
/// of yet. /// of yet.
fn on_disconnect(&self, _ctx: &EventContext, _unfulfilled: &[ReqId]) { } fn on_disconnect(&self, _ctx: &dyn EventContext, _unfulfilled: &[ReqId]) { }
/// Called when a peer makes an announcement. /// Called when a peer makes an announcement.
fn on_announcement(&self, _ctx: &EventContext, _announcement: &Announcement) { } fn on_announcement(&self, _ctx: &dyn EventContext, _announcement: &Announcement) { }
/// Called when a peer requests relay of some transactions. /// Called when a peer requests relay of some transactions.
fn on_transactions(&self, _ctx: &EventContext, _relay: &[UnverifiedTransaction]) { } fn on_transactions(&self, _ctx: &dyn EventContext, _relay: &[UnverifiedTransaction]) { }
/// Called when a peer responds to requests. /// Called when a peer responds to requests.
/// Responses not guaranteed to contain valid data and are not yet checked against /// Responses not guaranteed to contain valid data and are not yet checked against
/// the requests they correspond to. /// the requests they correspond to.
fn on_responses(&self, _ctx: &EventContext, _req_id: ReqId, _responses: &[Response]) { } fn on_responses(&self, _ctx: &dyn EventContext, _req_id: ReqId, _responses: &[Response]) { }
/// Called when a peer responds with a transaction proof. Each proof is a vector of state items. /// Called when a peer responds with a transaction proof. Each proof is a vector of state items.
fn on_transaction_proof(&self, _ctx: &EventContext, _req_id: ReqId, _state_items: &[DBValue]) { } fn on_transaction_proof(&self, _ctx: &dyn EventContext, _req_id: ReqId, _state_items: &[DBValue]) { }
/// Called to "tick" the handler periodically. /// Called to "tick" the handler periodically.
fn tick(&self, _ctx: &BasicContext) { } fn tick(&self, _ctx: &dyn BasicContext) { }
/// Called on abort. This signals to handlers that they should clean up /// Called on abort. This signals to handlers that they should clean up
/// and ignore peers. /// and ignore peers.
// TODO: coreresponding `on_activate`? // TODO: coreresponding `on_activate`?
@ -290,7 +290,7 @@ pub struct Params {
/// Initial capabilities. /// Initial capabilities.
pub capabilities: Capabilities, pub capabilities: Capabilities,
/// The sample store (`None` if data shouldn't persist between runs). /// The sample store (`None` if data shouldn't persist between runs).
pub sample_store: Option<Box<SampleStore>>, pub sample_store: Option<Box<dyn SampleStore>>,
} }
/// Type alias for convenience. /// Type alias for convenience.
@ -391,7 +391,7 @@ impl Statistics {
// Locks must be acquired in the order declared, and when holding a read lock // Locks must be acquired in the order declared, and when holding a read lock
// on the peers, only one peer may be held at a time. // on the peers, only one peer may be held at a time.
pub struct LightProtocol { pub struct LightProtocol {
provider: Arc<Provider>, provider: Arc<dyn Provider>,
config: Config, config: Config,
genesis_hash: H256, genesis_hash: H256,
network_id: u64, network_id: u64,
@ -400,16 +400,16 @@ pub struct LightProtocol {
capabilities: RwLock<Capabilities>, capabilities: RwLock<Capabilities>,
flow_params: RwLock<Arc<FlowParams>>, flow_params: RwLock<Arc<FlowParams>>,
free_flow_params: Arc<FlowParams>, free_flow_params: Arc<FlowParams>,
handlers: Vec<Arc<Handler>>, handlers: Vec<Arc<dyn Handler>>,
req_id: AtomicUsize, req_id: AtomicUsize,
sample_store: Box<SampleStore>, sample_store: Box<dyn SampleStore>,
load_distribution: LoadDistribution, load_distribution: LoadDistribution,
statistics: RwLock<Statistics>, statistics: RwLock<Statistics>,
} }
impl LightProtocol { impl LightProtocol {
/// Create a new instance of the protocol manager. /// Create a new instance of the protocol manager.
pub fn new(provider: Arc<Provider>, params: Params) -> Self { pub fn new(provider: Arc<dyn Provider>, params: Params) -> Self {
debug!(target: "pip", "Initializing light protocol handler"); debug!(target: "pip", "Initializing light protocol handler");
let genesis_hash = provider.chain_info().genesis_hash; let genesis_hash = provider.chain_info().genesis_hash;
@ -473,7 +473,7 @@ impl LightProtocol {
/// insufficient credits. Does not check capabilities before sending. /// insufficient credits. Does not check capabilities before sending.
/// On success, returns a request id which can later be coordinated /// On success, returns a request id which can later be coordinated
/// with an event. /// with an event.
pub fn request_from(&self, io: &IoContext, peer_id: PeerId, requests: Requests) -> Result<ReqId, Error> { pub fn request_from(&self, io: &dyn IoContext, peer_id: PeerId, requests: Requests) -> Result<ReqId, Error> {
let peers = self.peers.read(); let peers = self.peers.read();
let peer = match peers.get(&peer_id) { let peer = match peers.get(&peer_id) {
Some(peer) => peer, Some(peer) => peer,
@ -518,7 +518,7 @@ impl LightProtocol {
/// Make an announcement of new chain head and capabilities to all peers. /// Make an announcement of new chain head and capabilities to all peers.
/// The announcement is expected to be valid. /// The announcement is expected to be valid.
pub fn make_announcement(&self, io: &IoContext, mut announcement: Announcement) { pub fn make_announcement(&self, io: &dyn IoContext, mut announcement: Announcement) {
let mut reorgs_map = HashMap::new(); let mut reorgs_map = HashMap::new();
let now = Instant::now(); let now = Instant::now();
@ -568,7 +568,7 @@ impl LightProtocol {
/// These are intended to be added when the protocol structure /// These are intended to be added when the protocol structure
/// is initialized as a means of customizing its behavior, /// is initialized as a means of customizing its behavior,
/// and dispatching requests immediately upon events. /// and dispatching requests immediately upon events.
pub fn add_handler(&mut self, handler: Arc<Handler>) { pub fn add_handler(&mut self, handler: Arc<dyn Handler>) {
self.handlers.push(handler); self.handlers.push(handler);
} }
@ -635,7 +635,7 @@ impl LightProtocol {
/// Handle a packet using the given io context. /// Handle a packet using the given io context.
/// Packet data is _untrusted_, which means that invalid data won't lead to /// Packet data is _untrusted_, which means that invalid data won't lead to
/// issues. /// issues.
pub fn handle_packet(&self, io: &IoContext, peer: PeerId, packet_id: u8, data: &[u8]) { pub fn handle_packet(&self, io: &dyn IoContext, peer: PeerId, packet_id: u8, data: &[u8]) {
let rlp = Rlp::new(data); let rlp = Rlp::new(data);
trace!(target: "pip", "Incoming packet {} from peer {}", packet_id, peer); trace!(target: "pip", "Incoming packet {} from peer {}", packet_id, peer);
@ -664,7 +664,7 @@ impl LightProtocol {
} }
// check timeouts and punish peers. // check timeouts and punish peers.
fn timeout_check(&self, io: &IoContext) { fn timeout_check(&self, io: &dyn IoContext) {
let now = Instant::now(); let now = Instant::now();
// handshake timeout // handshake timeout
@ -706,7 +706,7 @@ impl LightProtocol {
// propagate transactions to relay peers. // propagate transactions to relay peers.
// if we aren't on the mainnet, we just propagate to all relay peers // if we aren't on the mainnet, we just propagate to all relay peers
fn propagate_transactions(&self, io: &IoContext) { fn propagate_transactions(&self, io: &dyn IoContext) {
if self.capabilities.read().tx_relay { return } if self.capabilities.read().tx_relay { return }
let ready_transactions = self.provider.transactions_to_propagate(); let ready_transactions = self.provider.transactions_to_propagate();
@ -746,7 +746,7 @@ impl LightProtocol {
} }
/// called when a peer connects. /// called when a peer connects.
pub fn on_connect(&self, peer: PeerId, io: &IoContext) { pub fn on_connect(&self, peer: PeerId, io: &dyn IoContext) {
let proto_version = match io.protocol_version(peer).ok_or(Error::WrongNetwork) { let proto_version = match io.protocol_version(peer).ok_or(Error::WrongNetwork) {
Ok(pv) => pv, Ok(pv) => pv,
Err(e) => { punish(peer, io, &e); return } Err(e) => { punish(peer, io, &e); return }
@ -788,7 +788,7 @@ impl LightProtocol {
} }
/// called when a peer disconnects. /// called when a peer disconnects.
pub fn on_disconnect(&self, peer: PeerId, io: &IoContext) { pub fn on_disconnect(&self, peer: PeerId, io: &dyn IoContext) {
trace!(target: "pip", "Peer {} disconnecting", peer); trace!(target: "pip", "Peer {} disconnecting", peer);
self.pending_peers.write().remove(&peer); self.pending_peers.write().remove(&peer);
@ -813,8 +813,8 @@ impl LightProtocol {
} }
/// Execute the given closure with a basic context derived from the I/O context. /// Execute the given closure with a basic context derived from the I/O context.
pub fn with_context<F, T>(&self, io: &IoContext, f: F) -> T pub fn with_context<F, T>(&self, io: &dyn IoContext, f: F) -> T
where F: FnOnce(&BasicContext) -> T where F: FnOnce(&dyn BasicContext) -> T
{ {
f(&TickCtx { f(&TickCtx {
io, io,
@ -822,7 +822,7 @@ impl LightProtocol {
}) })
} }
fn tick_handlers(&self, io: &IoContext) { fn tick_handlers(&self, io: &dyn IoContext) {
for handler in &self.handlers { for handler in &self.handlers {
handler.tick(&TickCtx { handler.tick(&TickCtx {
io, io,
@ -831,7 +831,7 @@ impl LightProtocol {
} }
} }
fn begin_new_cost_period(&self, io: &IoContext) { fn begin_new_cost_period(&self, io: &dyn IoContext) {
self.load_distribution.end_period(&*self.sample_store); self.load_distribution.end_period(&*self.sample_store);
let avg_peer_count = self.statistics.read().avg_peer_count(); let avg_peer_count = self.statistics.read().avg_peer_count();
@ -872,7 +872,7 @@ impl LightProtocol {
impl LightProtocol { impl LightProtocol {
// Handle status message from peer. // Handle status message from peer.
fn status(&self, peer: PeerId, io: &IoContext, data: &Rlp) -> Result<(), Error> { fn status(&self, peer: PeerId, io: &dyn IoContext, data: &Rlp) -> Result<(), Error> {
let pending = match self.pending_peers.write().remove(&peer) { let pending = match self.pending_peers.write().remove(&peer) {
Some(pending) => pending, Some(pending) => pending,
None => { None => {
@ -937,7 +937,7 @@ impl LightProtocol {
} }
// Handle an announcement. // Handle an announcement.
fn announcement(&self, peer: PeerId, io: &IoContext, data: &Rlp) -> Result<(), Error> { fn announcement(&self, peer: PeerId, io: &dyn IoContext, data: &Rlp) -> Result<(), Error> {
if !self.peers.read().contains_key(&peer) { if !self.peers.read().contains_key(&peer) {
debug!(target: "pip", "Ignoring announcement from unknown peer"); debug!(target: "pip", "Ignoring announcement from unknown peer");
return Ok(()) return Ok(())
@ -982,7 +982,7 @@ impl LightProtocol {
} }
// Receive requests from a peer. // Receive requests from a peer.
fn request(&self, peer_id: PeerId, io: &IoContext, raw: &Rlp) -> Result<(), Error> { fn request(&self, peer_id: PeerId, io: &dyn IoContext, raw: &Rlp) -> Result<(), Error> {
// the maximum amount of requests we'll fill in a single packet. // the maximum amount of requests we'll fill in a single packet.
const MAX_REQUESTS: usize = 256; const MAX_REQUESTS: usize = 256;
@ -1050,7 +1050,7 @@ impl LightProtocol {
} }
// handle a packet with responses. // handle a packet with responses.
fn response(&self, peer: PeerId, io: &IoContext, raw: &Rlp) -> Result<(), Error> { fn response(&self, peer: PeerId, io: &dyn IoContext, raw: &Rlp) -> Result<(), Error> {
let (req_id, responses) = { let (req_id, responses) = {
let id_guard = self.pre_verify_response(peer, &raw)?; let id_guard = self.pre_verify_response(peer, &raw)?;
let responses: Vec<Response> = raw.list_at(2)?; let responses: Vec<Response> = raw.list_at(2)?;
@ -1069,7 +1069,7 @@ impl LightProtocol {
} }
// handle an update of request credits parameters. // handle an update of request credits parameters.
fn update_credits(&self, peer_id: PeerId, io: &IoContext, raw: &Rlp) -> Result<(), Error> { fn update_credits(&self, peer_id: PeerId, io: &dyn IoContext, raw: &Rlp) -> Result<(), Error> {
let peers = self.peers.read(); let peers = self.peers.read();
let peer = peers.get(&peer_id).ok_or(Error::UnknownPeer)?; let peer = peers.get(&peer_id).ok_or(Error::UnknownPeer)?;
@ -1104,7 +1104,7 @@ impl LightProtocol {
} }
// handle an acknowledgement of request credits update. // handle an acknowledgement of request credits update.
fn acknowledge_update(&self, peer_id: PeerId, _io: &IoContext, _raw: &Rlp) -> Result<(), Error> { fn acknowledge_update(&self, peer_id: PeerId, _io: &dyn IoContext, _raw: &Rlp) -> Result<(), Error> {
let peers = self.peers.read(); let peers = self.peers.read();
let peer = peers.get(&peer_id).ok_or(Error::UnknownPeer)?; let peer = peers.get(&peer_id).ok_or(Error::UnknownPeer)?;
let mut peer = peer.lock(); let mut peer = peer.lock();
@ -1123,7 +1123,7 @@ impl LightProtocol {
} }
// Receive a set of transactions to relay. // Receive a set of transactions to relay.
fn relay_transactions(&self, peer: PeerId, io: &IoContext, data: &Rlp) -> Result<(), Error> { fn relay_transactions(&self, peer: PeerId, io: &dyn IoContext, data: &Rlp) -> Result<(), Error> {
const MAX_TRANSACTIONS: usize = 256; const MAX_TRANSACTIONS: usize = 256;
let txs: Vec<_> = data.iter() let txs: Vec<_> = data.iter()
@ -1146,7 +1146,7 @@ impl LightProtocol {
} }
// if something went wrong, figure out how much to punish the peer. // if something went wrong, figure out how much to punish the peer.
fn punish(peer: PeerId, io: &IoContext, e: &Error) { fn punish(peer: PeerId, io: &dyn IoContext, e: &Error) {
match e.punishment() { match e.punishment() {
Punishment::None => {} Punishment::None => {}
Punishment::Disconnect => { Punishment::Disconnect => {
@ -1161,7 +1161,7 @@ fn punish(peer: PeerId, io: &IoContext, e: &Error) {
} }
impl NetworkProtocolHandler for LightProtocol { impl NetworkProtocolHandler for LightProtocol {
fn initialize(&self, io: &NetworkContext) { fn initialize(&self, io: &dyn NetworkContext) {
io.register_timer(TIMEOUT, TIMEOUT_INTERVAL) io.register_timer(TIMEOUT, TIMEOUT_INTERVAL)
.expect("Error registering sync timer."); .expect("Error registering sync timer.");
io.register_timer(TICK_TIMEOUT, TICK_TIMEOUT_INTERVAL) io.register_timer(TICK_TIMEOUT, TICK_TIMEOUT_INTERVAL)
@ -1174,19 +1174,19 @@ impl NetworkProtocolHandler for LightProtocol {
.expect("Error registering statistics timer."); .expect("Error registering statistics timer.");
} }
fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { fn read(&self, io: &dyn NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) {
self.handle_packet(&io, *peer, packet_id, data); self.handle_packet(&io, *peer, packet_id, data);
} }
fn connected(&self, io: &NetworkContext, peer: &PeerId) { fn connected(&self, io: &dyn NetworkContext, peer: &PeerId) {
self.on_connect(*peer, &io); self.on_connect(*peer, &io);
} }
fn disconnected(&self, io: &NetworkContext, peer: &PeerId) { fn disconnected(&self, io: &dyn NetworkContext, peer: &PeerId) {
self.on_disconnect(*peer, &io); self.on_disconnect(*peer, &io);
} }
fn timeout(&self, io: &NetworkContext, timer: TimerToken) { fn timeout(&self, io: &dyn NetworkContext, timer: TimerToken) {
match timer { match timer {
TIMEOUT => self.timeout_check(&io), TIMEOUT => self.timeout_check(&io),
TICK_TIMEOUT => self.tick_handlers(&io), TICK_TIMEOUT => self.tick_handlers(&io),

View File

@ -81,7 +81,7 @@ pub mod error {
} }
impl std::error::Error for Error { impl std::error::Error for Error {
fn source(&self) -> Option<&(std::error::Error + 'static)> { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self { match self {
Error::ChannelCanceled(err) => Some(err), Error::ChannelCanceled(err) => Some(err),
_ => None, _ => None,
@ -98,7 +98,7 @@ pub trait OnDemandRequester: Send + Sync {
/// Submit a strongly-typed batch of requests. /// Submit a strongly-typed batch of requests.
/// ///
/// Fails if back-reference are not coherent. /// Fails if back-reference are not coherent.
fn request<T>(&self, ctx: &BasicContext, requests: T) -> Result<OnResponses<T>, basic_request::NoSuchOutput> fn request<T>(&self, ctx: &dyn BasicContext, requests: T) -> Result<OnResponses<T>, basic_request::NoSuchOutput>
where where
T: request::RequestAdapter; T: request::RequestAdapter;
@ -106,7 +106,7 @@ pub trait OnDemandRequester: Send + Sync {
/// ///
/// Fails if back-references are not coherent. /// Fails if back-references are not coherent.
/// The returned vector of responses will correspond to the requests exactly. /// The returned vector of responses will correspond to the requests exactly.
fn request_raw(&self, ctx: &BasicContext, requests: Vec<Request>) fn request_raw(&self, ctx: &dyn BasicContext, requests: Vec<Request>)
-> Result<Receiver<PendingResponse>, basic_request::NoSuchOutput>; -> Result<Receiver<PendingResponse>, basic_request::NoSuchOutput>;
} }
@ -373,7 +373,7 @@ pub struct OnDemand {
} }
impl OnDemandRequester for OnDemand { impl OnDemandRequester for OnDemand {
fn request_raw(&self, ctx: &BasicContext, requests: Vec<Request>) fn request_raw(&self, ctx: &dyn BasicContext, requests: Vec<Request>)
-> Result<Receiver<PendingResponse>, basic_request::NoSuchOutput> -> Result<Receiver<PendingResponse>, basic_request::NoSuchOutput>
{ {
let (sender, receiver) = oneshot::channel(); let (sender, receiver) = oneshot::channel();
@ -429,7 +429,7 @@ impl OnDemandRequester for OnDemand {
Ok(receiver) Ok(receiver)
} }
fn request<T>(&self, ctx: &BasicContext, requests: T) -> Result<OnResponses<T>, basic_request::NoSuchOutput> fn request<T>(&self, ctx: &dyn BasicContext, requests: T) -> Result<OnResponses<T>, basic_request::NoSuchOutput>
where T: request::RequestAdapter where T: request::RequestAdapter
{ {
self.request_raw(ctx, requests.make_requests()).map(|recv| OnResponses { self.request_raw(ctx, requests.make_requests()).map(|recv| OnResponses {
@ -503,7 +503,7 @@ impl OnDemand {
// maybe dispatch pending requests. // maybe dispatch pending requests.
// sometimes // sometimes
fn attempt_dispatch(&self, ctx: &BasicContext) { fn attempt_dispatch(&self, ctx: &dyn BasicContext) {
if !self.no_immediate_dispatch { if !self.no_immediate_dispatch {
self.dispatch_pending(ctx) self.dispatch_pending(ctx)
} }
@ -511,7 +511,7 @@ impl OnDemand {
// dispatch pending requests, and discard those for which the corresponding // dispatch pending requests, and discard those for which the corresponding
// receiver has been dropped. // receiver has been dropped.
fn dispatch_pending(&self, ctx: &BasicContext) { fn dispatch_pending(&self, ctx: &dyn BasicContext) {
if self.pending.read().is_empty() { if self.pending.read().is_empty() {
return return
} }
@ -566,7 +566,7 @@ impl OnDemand {
// submit a pending request set. attempts to answer from cache before // submit a pending request set. attempts to answer from cache before
// going to the network. if complete, sends response and consumes the struct. // going to the network. if complete, sends response and consumes the struct.
fn submit_pending(&self, ctx: &BasicContext, mut pending: Pending) { fn submit_pending(&self, ctx: &dyn BasicContext, mut pending: Pending) {
// answer as many requests from cache as we can, and schedule for dispatch // answer as many requests from cache as we can, and schedule for dispatch
// if incomplete. // if incomplete.
@ -585,7 +585,7 @@ impl OnDemand {
impl Handler for OnDemand { impl Handler for OnDemand {
fn on_connect( fn on_connect(
&self, &self,
ctx: &EventContext, ctx: &dyn EventContext,
status: &Status, status: &Status,
capabilities: &Capabilities capabilities: &Capabilities
) -> PeerStatus { ) -> PeerStatus {
@ -597,7 +597,7 @@ impl Handler for OnDemand {
PeerStatus::Kept PeerStatus::Kept
} }
fn on_disconnect(&self, ctx: &EventContext, unfulfilled: &[ReqId]) { fn on_disconnect(&self, ctx: &dyn EventContext, unfulfilled: &[ReqId]) {
self.peers.write().remove(&ctx.peer()); self.peers.write().remove(&ctx.peer());
let ctx = ctx.as_basic(); let ctx = ctx.as_basic();
@ -614,7 +614,7 @@ impl Handler for OnDemand {
self.attempt_dispatch(ctx); self.attempt_dispatch(ctx);
} }
fn on_announcement(&self, ctx: &EventContext, announcement: &Announcement) { fn on_announcement(&self, ctx: &dyn EventContext, announcement: &Announcement) {
{ {
let mut peers = self.peers.write(); let mut peers = self.peers.write();
if let Some(ref mut peer) = peers.get_mut(&ctx.peer()) { if let Some(ref mut peer) = peers.get_mut(&ctx.peer()) {
@ -626,7 +626,7 @@ impl Handler for OnDemand {
self.attempt_dispatch(ctx.as_basic()); self.attempt_dispatch(ctx.as_basic());
} }
fn on_responses(&self, ctx: &EventContext, req_id: ReqId, responses: &[basic_request::Response]) { fn on_responses(&self, ctx: &dyn EventContext, req_id: ReqId, responses: &[basic_request::Response]) {
let mut pending = match self.in_transit.write().remove(&req_id) { let mut pending = match self.in_transit.write().remove(&req_id) {
Some(req) => req, Some(req) => req,
None => return, None => return,
@ -662,7 +662,7 @@ impl Handler for OnDemand {
self.submit_pending(ctx.as_basic(), pending); self.submit_pending(ctx.as_basic(), pending);
} }
fn tick(&self, ctx: &BasicContext) { fn tick(&self, ctx: &dyn BasicContext) {
self.attempt_dispatch(ctx) self.attempt_dispatch(ctx)
} }
} }

View File

@ -1032,7 +1032,7 @@ pub struct TransactionProof {
// TODO: it's not really possible to provide this if the header is unknown. // TODO: it's not really possible to provide this if the header is unknown.
pub env_info: EnvInfo, pub env_info: EnvInfo,
/// Consensus engine. /// Consensus engine.
pub engine: Arc<Engine>, pub engine: Arc<dyn Engine>,
} }
impl TransactionProof { impl TransactionProof {
@ -1075,9 +1075,9 @@ pub struct Signal {
/// Block hash and number to fetch proof for. /// Block hash and number to fetch proof for.
pub hash: H256, pub hash: H256,
/// Consensus engine, used to check the proof. /// Consensus engine, used to check the proof.
pub engine: Arc<Engine>, pub engine: Arc<dyn Engine>,
/// Special checker for the proof. /// Special checker for the proof.
pub proof_check: Arc<StateDependentProof>, pub proof_check: Arc<dyn StateDependentProof>,
} }
impl Signal { impl Signal {

View File

@ -51,7 +51,7 @@ impl EventContext for Context {
} }
} }
fn as_basic(&self) -> &BasicContext { self } fn as_basic(&self) -> &dyn BasicContext { self }
} }
impl BasicContext for Context { impl BasicContext for Context {

View File

@ -223,7 +223,7 @@ impl From<light_net::Status> for PipProtocolInfo {
/// Only works when IPC is disabled. /// Only works when IPC is disabled.
pub struct AttachedProtocol { pub struct AttachedProtocol {
/// The protocol handler in question. /// The protocol handler in question.
pub handler: Arc<NetworkProtocolHandler + Send + Sync>, pub handler: Arc<dyn NetworkProtocolHandler + Send + Sync>,
/// 3-character ID for the protocol. /// 3-character ID for the protocol.
pub protocol_id: ProtocolId, pub protocol_id: ProtocolId,
/// Supported versions and their packet counts. /// Supported versions and their packet counts.
@ -282,13 +282,13 @@ pub struct Params {
/// Runtime executor /// Runtime executor
pub executor: Executor, pub executor: Executor,
/// Blockchain client. /// Blockchain client.
pub chain: Arc<BlockChainClient>, pub chain: Arc<dyn BlockChainClient>,
/// Snapshot service. /// Snapshot service.
pub snapshot_service: Arc<SnapshotService>, pub snapshot_service: Arc<dyn SnapshotService>,
/// Private tx service. /// Private tx service.
pub private_tx_handler: Option<Arc<PrivateTxHandler>>, pub private_tx_handler: Option<Arc<dyn PrivateTxHandler>>,
/// Light data provider. /// Light data provider.
pub provider: Arc<::light::Provider>, pub provider: Arc<dyn (::light::Provider)>,
/// Network layer configuration. /// Network layer configuration.
pub network_config: NetworkConfiguration, pub network_config: NetworkConfiguration,
/// Other protocols to attach. /// Other protocols to attach.
@ -319,7 +319,7 @@ fn light_params(
network_id: u64, network_id: u64,
median_peers: f64, median_peers: f64,
pruning_info: PruningInfo, pruning_info: PruningInfo,
sample_store: Option<Box<SampleStore>>, sample_store: Option<Box<dyn SampleStore>>,
) -> LightParams { ) -> LightParams {
let mut light_params = LightParams { let mut light_params = LightParams {
network_id: network_id, network_id: network_id,
@ -339,7 +339,7 @@ fn light_params(
impl EthSync { impl EthSync {
/// Creates and register protocol with the network service /// Creates and register protocol with the network service
pub fn new(params: Params, connection_filter: Option<Arc<ConnectionFilter>>) -> Result<Arc<EthSync>, Error> { pub fn new(params: Params, connection_filter: Option<Arc<dyn ConnectionFilter>>) -> Result<Arc<EthSync>, Error> {
let pruning_info = params.chain.pruning_info(); let pruning_info = params.chain.pruning_info();
let light_proto = match params.config.serve_light { let light_proto = match params.config.serve_light {
false => None, false => None,
@ -482,9 +482,9 @@ pub(crate) const PRIORITY_TIMER_INTERVAL: Duration = Duration::from_millis(250);
struct SyncProtocolHandler { struct SyncProtocolHandler {
/// Shared blockchain client. /// Shared blockchain client.
chain: Arc<BlockChainClient>, chain: Arc<dyn BlockChainClient>,
/// Shared snapshot service. /// Shared snapshot service.
snapshot_service: Arc<SnapshotService>, snapshot_service: Arc<dyn SnapshotService>,
/// Sync strategy /// Sync strategy
sync: ChainSyncApi, sync: ChainSyncApi,
/// Chain overlay used to cache data such as fork block. /// Chain overlay used to cache data such as fork block.
@ -492,7 +492,7 @@ struct SyncProtocolHandler {
} }
impl NetworkProtocolHandler for SyncProtocolHandler { impl NetworkProtocolHandler for SyncProtocolHandler {
fn initialize(&self, io: &NetworkContext) { fn initialize(&self, io: &dyn NetworkContext) {
if io.subprotocol_name() != WARP_SYNC_PROTOCOL_ID { if io.subprotocol_name() != WARP_SYNC_PROTOCOL_ID {
io.register_timer(PEERS_TIMER, Duration::from_millis(700)).expect("Error registering peers timer"); io.register_timer(PEERS_TIMER, Duration::from_millis(700)).expect("Error registering peers timer");
io.register_timer(MAINTAIN_SYNC_TIMER, Duration::from_millis(1100)).expect("Error registering sync timer"); io.register_timer(MAINTAIN_SYNC_TIMER, Duration::from_millis(1100)).expect("Error registering sync timer");
@ -503,11 +503,11 @@ impl NetworkProtocolHandler for SyncProtocolHandler {
} }
} }
fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { fn read(&self, io: &dyn NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) {
self.sync.dispatch_packet(&mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service, &self.overlay), *peer, packet_id, data); self.sync.dispatch_packet(&mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service, &self.overlay), *peer, packet_id, data);
} }
fn connected(&self, io: &NetworkContext, peer: &PeerId) { fn connected(&self, io: &dyn NetworkContext, peer: &PeerId) {
trace_time!("sync::connected"); trace_time!("sync::connected");
// If warp protocol is supported only allow warp handshake // If warp protocol is supported only allow warp handshake
let warp_protocol = io.protocol_version(WARP_SYNC_PROTOCOL_ID, *peer).unwrap_or(0) != 0; let warp_protocol = io.protocol_version(WARP_SYNC_PROTOCOL_ID, *peer).unwrap_or(0) != 0;
@ -517,14 +517,14 @@ impl NetworkProtocolHandler for SyncProtocolHandler {
} }
} }
fn disconnected(&self, io: &NetworkContext, peer: &PeerId) { fn disconnected(&self, io: &dyn NetworkContext, peer: &PeerId) {
trace_time!("sync::disconnected"); trace_time!("sync::disconnected");
if io.subprotocol_name() != WARP_SYNC_PROTOCOL_ID { if io.subprotocol_name() != WARP_SYNC_PROTOCOL_ID {
self.sync.write().on_peer_aborting(&mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service, &self.overlay), *peer); self.sync.write().on_peer_aborting(&mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service, &self.overlay), *peer);
} }
} }
fn timeout(&self, io: &NetworkContext, timer: TimerToken) { fn timeout(&self, io: &dyn NetworkContext, timer: TimerToken) {
trace_time!("sync::timeout"); trace_time!("sync::timeout");
let mut io = NetSyncIo::new(io, &*self.chain, &*self.snapshot_service, &self.overlay); let mut io = NetSyncIo::new(io, &*self.chain, &*self.snapshot_service, &self.overlay);
match timer { match timer {
@ -644,10 +644,10 @@ impl ChainNotify for EthSync {
/// PIP event handler. /// PIP event handler.
/// Simply queues transactions from light client peers. /// Simply queues transactions from light client peers.
struct TxRelay(Arc<BlockChainClient>); struct TxRelay(Arc<dyn BlockChainClient>);
impl LightHandler for TxRelay { impl LightHandler for TxRelay {
fn on_transactions(&self, ctx: &EventContext, relay: &[::types::transaction::UnverifiedTransaction]) { fn on_transactions(&self, ctx: &dyn EventContext, relay: &[::types::transaction::UnverifiedTransaction]) {
trace!(target: "pip", "Relaying {} transactions from peer {}", relay.len(), ctx.peer()); trace!(target: "pip", "Relaying {} transactions from peer {}", relay.len(), ctx.peer());
self.0.queue_transactions(relay.iter().map(|tx| ::rlp::encode(tx)).collect(), ctx.peer()) self.0.queue_transactions(relay.iter().map(|tx| ::rlp::encode(tx)).collect(), ctx.peer())
} }
@ -670,7 +670,7 @@ pub trait ManageNetwork : Send + Sync {
/// Returns the minimum and maximum peers. /// Returns the minimum and maximum peers.
fn num_peers_range(&self) -> RangeInclusive<u32>; fn num_peers_range(&self) -> RangeInclusive<u32>;
/// Get network context for protocol. /// Get network context for protocol.
fn with_proto_context(&self, proto: ProtocolId, f: &mut FnMut(&NetworkContext)); fn with_proto_context(&self, proto: ProtocolId, f: &mut dyn FnMut(&dyn NetworkContext));
} }
impl ManageNetwork for EthSync { impl ManageNetwork for EthSync {
@ -711,7 +711,7 @@ impl ManageNetwork for EthSync {
self.network.num_peers_range() self.network.num_peers_range()
} }
fn with_proto_context(&self, proto: ProtocolId, f: &mut FnMut(&NetworkContext)) { fn with_proto_context(&self, proto: ProtocolId, f: &mut dyn FnMut(&dyn NetworkContext)) {
self.network.with_context_eval(proto, f); self.network.with_context_eval(proto, f);
} }
} }
@ -871,7 +871,7 @@ pub trait LightSyncInfo: Send + Sync {
/// Execute a closure with a protocol context. /// Execute a closure with a protocol context.
pub trait LightNetworkDispatcher { pub trait LightNetworkDispatcher {
/// Execute a closure with a protocol context. /// Execute a closure with a protocol context.
fn with_context<F, T>(&self, f: F) -> Option<T> where F: FnOnce(&::light::net::BasicContext) -> T; fn with_context<F, T>(&self, f: F) -> Option<T> where F: FnOnce(&dyn (::light::net::BasicContext)) -> T;
} }
/// Configuration for the light sync. /// Configuration for the light sync.
@ -885,7 +885,7 @@ pub struct LightSyncParams<L> {
/// Subprotocol name. /// Subprotocol name.
pub subprotocol_name: [u8; 3], pub subprotocol_name: [u8; 3],
/// Other handlers to attach. /// Other handlers to attach.
pub handlers: Vec<Arc<LightHandler>>, pub handlers: Vec<Arc<dyn LightHandler>>,
/// Other subprotocols to run. /// Other subprotocols to run.
pub attached_protos: Vec<AttachedProtocol>, pub attached_protos: Vec<AttachedProtocol>,
} }
@ -893,7 +893,7 @@ pub struct LightSyncParams<L> {
/// Service for light synchronization. /// Service for light synchronization.
pub struct LightSync { pub struct LightSync {
proto: Arc<LightProtocol>, proto: Arc<LightProtocol>,
sync: Arc<SyncInfo + Sync + Send>, sync: Arc<dyn SyncInfo + Sync + Send>,
attached_protos: Vec<AttachedProtocol>, attached_protos: Vec<AttachedProtocol>,
network: NetworkService, network: NetworkService,
subprotocol_name: [u8; 3], subprotocol_name: [u8; 3],
@ -947,14 +947,14 @@ impl LightSync {
} }
impl ::std::ops::Deref for LightSync { impl ::std::ops::Deref for LightSync {
type Target = ::light_sync::SyncInfo; type Target = dyn (::light_sync::SyncInfo);
fn deref(&self) -> &Self::Target { &*self.sync } fn deref(&self) -> &Self::Target { &*self.sync }
} }
impl LightNetworkDispatcher for LightSync { impl LightNetworkDispatcher for LightSync {
fn with_context<F, T>(&self, f: F) -> Option<T> where F: FnOnce(&::light::net::BasicContext) -> T { fn with_context<F, T>(&self, f: F) -> Option<T> where F: FnOnce(&dyn (::light::net::BasicContext)) -> T {
self.network.with_context_eval( self.network.with_context_eval(
self.subprotocol_name, self.subprotocol_name,
move |ctx| self.proto.with_context(&ctx, f), move |ctx| self.proto.with_context(&ctx, f),
@ -1009,7 +1009,7 @@ impl ManageNetwork for LightSync {
self.network.num_peers_range() self.network.num_peers_range()
} }
fn with_proto_context(&self, proto: ProtocolId, f: &mut FnMut(&NetworkContext)) { fn with_proto_context(&self, proto: ProtocolId, f: &mut dyn FnMut(&dyn NetworkContext)) {
self.network.with_context_eval(proto, f); self.network.with_context_eval(proto, f);
} }
} }

View File

@ -230,7 +230,7 @@ impl BlockDownloader {
} }
/// Add new block headers. /// Add new block headers.
pub fn import_headers(&mut self, io: &mut SyncIo, r: &Rlp, expected_hash: H256) -> Result<DownloadAction, BlockDownloaderImportError> { pub fn import_headers(&mut self, io: &mut dyn SyncIo, r: &Rlp, expected_hash: H256) -> Result<DownloadAction, BlockDownloaderImportError> {
let item_count = r.item_count().unwrap_or(0); let item_count = r.item_count().unwrap_or(0);
if self.state == State::Idle { if self.state == State::Idle {
trace_sync!(self, "Ignored unexpected block headers"); trace_sync!(self, "Ignored unexpected block headers");
@ -415,7 +415,7 @@ impl BlockDownloader {
Ok(()) Ok(())
} }
fn start_sync_round(&mut self, io: &mut SyncIo) { fn start_sync_round(&mut self, io: &mut dyn SyncIo) {
self.state = State::ChainHead; self.state = State::ChainHead;
trace_sync!(self, "Starting round (last imported count = {:?}, last started = {}, block = {:?}", self.imported_this_round, self.last_round_start, self.last_imported_block); trace_sync!(self, "Starting round (last imported count = {:?}, last started = {}, block = {:?}", self.imported_this_round, self.last_round_start, self.last_imported_block);
// Check if need to retract to find the common block. The problem is that the peers still return headers by hash even // Check if need to retract to find the common block. The problem is that the peers still return headers by hash even
@ -463,7 +463,7 @@ impl BlockDownloader {
} }
/// Find some headers or blocks to download for a peer. /// Find some headers or blocks to download for a peer.
pub fn request_blocks(&mut self, peer_id: PeerId, io: &mut SyncIo, num_active_peers: usize) -> Option<BlockRequest> { pub fn request_blocks(&mut self, peer_id: PeerId, io: &mut dyn SyncIo, num_active_peers: usize) -> Option<BlockRequest> {
match self.state { match self.state {
State::Idle => { State::Idle => {
self.start_sync_round(io); self.start_sync_round(io);
@ -526,7 +526,7 @@ impl BlockDownloader {
/// Checks if there are blocks fully downloaded that can be imported into the blockchain and does the import. /// Checks if there are blocks fully downloaded that can be imported into the blockchain and does the import.
/// Returns DownloadAction::Reset if it is imported all the the blocks it can and all downloading peers should be reset /// Returns DownloadAction::Reset if it is imported all the the blocks it can and all downloading peers should be reset
pub fn collect_blocks(&mut self, io: &mut SyncIo, allow_out_of_order: bool) -> DownloadAction { pub fn collect_blocks(&mut self, io: &mut dyn SyncIo, allow_out_of_order: bool) -> DownloadAction {
let mut download_action = DownloadAction::None; let mut download_action = DownloadAction::None;
let mut imported = HashSet::new(); let mut imported = HashSet::new();
let blocks = self.blocks.drain(); let blocks = self.blocks.drain();
@ -661,7 +661,7 @@ mod tests {
Transaction::default().sign(keypair.secret(), None) Transaction::default().sign(keypair.secret(), None)
} }
fn import_headers(headers: &[BlockHeader], downloader: &mut BlockDownloader, io: &mut SyncIo) -> Result<DownloadAction, BlockDownloaderImportError> { fn import_headers(headers: &[BlockHeader], downloader: &mut BlockDownloader, io: &mut dyn SyncIo) -> Result<DownloadAction, BlockDownloaderImportError> {
let mut stream = RlpStream::new(); let mut stream = RlpStream::new();
stream.append_list(headers); stream.append_list(headers);
let bytes = stream.out(); let bytes = stream.out();
@ -670,7 +670,7 @@ mod tests {
downloader.import_headers(io, &rlp, expected_hash) downloader.import_headers(io, &rlp, expected_hash)
} }
fn import_headers_ok(headers: &[BlockHeader], downloader: &mut BlockDownloader, io: &mut SyncIo) { fn import_headers_ok(headers: &[BlockHeader], downloader: &mut BlockDownloader, io: &mut dyn SyncIo) {
let res = import_headers(headers, downloader, io); let res = import_headers(headers, downloader, io);
assert!(res.is_ok()); assert!(res.is_ok());
} }
@ -812,13 +812,13 @@ mod tests {
let mut parent_hash = H256::zero(); let mut parent_hash = H256::zero();
for i in 0..4 { for i in 0..4 {
// Construct the block body // Construct the block body
let mut uncles = if i > 0 { let uncles = if i > 0 {
encode_list(&[dummy_header(i - 1, H256::random())]) encode_list(&[dummy_header(i - 1, H256::random())])
} else { } else {
::rlp::EMPTY_LIST_RLP.to_vec() ::rlp::EMPTY_LIST_RLP.to_vec()
}; };
let mut txs = encode_list(&[dummy_signed_tx()]); let txs = encode_list(&[dummy_signed_tx()]);
let tx_root = ordered_trie_root(Rlp::new(&txs).iter().map(|r| r.as_raw())); let tx_root = ordered_trie_root(Rlp::new(&txs).iter().map(|r| r.as_raw()));
let mut rlp = RlpStream::new_list(2); let mut rlp = RlpStream::new_list(2);
@ -883,7 +883,7 @@ mod tests {
// //
// The RLP-encoded integers are clearly not receipts, but the BlockDownloader treats // The RLP-encoded integers are clearly not receipts, but the BlockDownloader treats
// all receipts as byte blobs, so it does not matter. // all receipts as byte blobs, so it does not matter.
let mut receipts_rlp = if i < 2 { let receipts_rlp = if i < 2 {
encode_list(&[0u32]) encode_list(&[0u32])
} else { } else {
encode_list(&[i as u32]) encode_list(&[i as u32])

View File

@ -562,7 +562,7 @@ mod test {
assert!(is_empty(&bc)); assert!(is_empty(&bc));
let client = TestBlockChainClient::new(); let client = TestBlockChainClient::new();
client.add_blocks(100, EachBlockWith::Nothing); client.add_blocks(100, EachBlockWith::Nothing);
let hashes = (0 .. 100).map(|i| (&client as &BlockChainClient).block_hash(BlockId::Number(i)).unwrap()).collect(); let hashes = (0 .. 100).map(|i| (&client as &dyn BlockChainClient).block_hash(BlockId::Number(i)).unwrap()).collect();
bc.reset_to(hashes); bc.reset_to(hashes);
assert!(!is_empty(&bc)); assert!(!is_empty(&bc));
bc.clear(); bc.clear();
@ -577,7 +577,7 @@ mod test {
let nblocks = 200; let nblocks = 200;
client.add_blocks(nblocks, EachBlockWith::Nothing); client.add_blocks(nblocks, EachBlockWith::Nothing);
let blocks: Vec<_> = (0..nblocks) let blocks: Vec<_> = (0..nblocks)
.map(|i| (&client as &BlockChainClient).block(BlockId::Number(i as BlockNumber)).unwrap().into_inner()) .map(|i| (&client as &dyn BlockChainClient).block(BlockId::Number(i as BlockNumber)).unwrap().into_inner())
.collect(); .collect();
let headers: Vec<_> = blocks.iter().map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap()).collect(); let headers: Vec<_> = blocks.iter().map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap()).collect();
let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect(); let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect();
@ -639,7 +639,7 @@ mod test {
let nblocks = 200; let nblocks = 200;
client.add_blocks(nblocks, EachBlockWith::Nothing); client.add_blocks(nblocks, EachBlockWith::Nothing);
let blocks: Vec<_> = (0..nblocks) let blocks: Vec<_> = (0..nblocks)
.map(|i| (&client as &BlockChainClient).block(BlockId::Number(i as BlockNumber)).unwrap().into_inner()) .map(|i| (&client as &dyn BlockChainClient).block(BlockId::Number(i as BlockNumber)).unwrap().into_inner())
.collect(); .collect();
let headers: Vec<_> = blocks.iter().map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap()).collect(); let headers: Vec<_> = blocks.iter().map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap()).collect();
let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect(); let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect();
@ -663,7 +663,7 @@ mod test {
let nblocks = 200; let nblocks = 200;
client.add_blocks(nblocks, EachBlockWith::Nothing); client.add_blocks(nblocks, EachBlockWith::Nothing);
let blocks: Vec<_> = (0..nblocks) let blocks: Vec<_> = (0..nblocks)
.map(|i| (&client as &BlockChainClient).block(BlockId::Number(i as BlockNumber)).unwrap().into_inner()) .map(|i| (&client as &dyn BlockChainClient).block(BlockId::Number(i as BlockNumber)).unwrap().into_inner())
.collect(); .collect();
let headers: Vec<_> = blocks.iter().map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap()).collect(); let headers: Vec<_> = blocks.iter().map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap()).collect();
let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect(); let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect();

View File

@ -70,7 +70,7 @@ pub struct SyncHandler;
impl SyncHandler { impl SyncHandler {
/// Handle incoming packet from peer /// Handle incoming packet from peer
pub fn on_packet(sync: &mut ChainSync, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) { pub fn on_packet(sync: &mut ChainSync, io: &mut dyn SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) {
let rlp = Rlp::new(data); let rlp = Rlp::new(data);
if let Some(packet_id) = SyncPacket::from_u8(packet_id) { if let Some(packet_id) = SyncPacket::from_u8(packet_id) {
let result = match packet_id { let result = match packet_id {
@ -110,13 +110,13 @@ impl SyncHandler {
} }
/// Called when peer sends us new consensus packet /// Called when peer sends us new consensus packet
pub fn on_consensus_packet(io: &mut SyncIo, peer_id: PeerId, r: &Rlp) { pub fn on_consensus_packet(io: &mut dyn SyncIo, peer_id: PeerId, r: &Rlp) {
trace!(target: "sync", "Received consensus packet from {:?}", peer_id); trace!(target: "sync", "Received consensus packet from {:?}", peer_id);
io.chain().queue_consensus_message(r.as_raw().to_vec()); io.chain().queue_consensus_message(r.as_raw().to_vec());
} }
/// Called by peer when it is disconnecting /// Called by peer when it is disconnecting
pub fn on_peer_aborting(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId) { pub fn on_peer_aborting(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId) {
trace!(target: "sync", "== Disconnecting {}: {}", peer_id, io.peer_version(peer_id)); trace!(target: "sync", "== Disconnecting {}: {}", peer_id, io.peer_version(peer_id));
sync.handshaking_peers.remove(&peer_id); sync.handshaking_peers.remove(&peer_id);
if sync.peers.contains_key(&peer_id) { if sync.peers.contains_key(&peer_id) {
@ -142,7 +142,7 @@ impl SyncHandler {
} }
/// Called when a new peer is connected /// Called when a new peer is connected
pub fn on_peer_connected(sync: &mut ChainSync, io: &mut SyncIo, peer: PeerId) { pub fn on_peer_connected(sync: &mut ChainSync, io: &mut dyn SyncIo, peer: PeerId) {
trace!(target: "sync", "== Connected {}: {}", peer, io.peer_version(peer)); trace!(target: "sync", "== Connected {}: {}", peer, io.peer_version(peer));
if let Err(e) = sync.send_status(io, peer) { if let Err(e) = sync.send_status(io, peer) {
debug!(target:"sync", "Error sending status request: {:?}", e); debug!(target:"sync", "Error sending status request: {:?}", e);
@ -153,7 +153,7 @@ impl SyncHandler {
} }
/// Called by peer once it has new block bodies /// Called by peer once it has new block bodies
pub fn on_peer_new_block(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> { pub fn on_peer_new_block(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> {
if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) {
trace!(target: "sync", "Ignoring new block from unconfirmed peer {}", peer_id); trace!(target: "sync", "Ignoring new block from unconfirmed peer {}", peer_id);
return Ok(()); return Ok(());
@ -217,7 +217,7 @@ impl SyncHandler {
} }
/// Handles `NewHashes` packet. Initiates headers download for any unknown hashes. /// Handles `NewHashes` packet. Initiates headers download for any unknown hashes.
pub fn on_peer_new_hashes(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> { pub fn on_peer_new_hashes(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> {
if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) {
trace!(target: "sync", "Ignoring new hashes from unconfirmed peer {}", peer_id); trace!(target: "sync", "Ignoring new hashes from unconfirmed peer {}", peer_id);
return Ok(()); return Ok(());
@ -288,7 +288,7 @@ impl SyncHandler {
} }
/// Called by peer once it has new block bodies /// Called by peer once it has new block bodies
fn on_peer_block_bodies(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> { fn on_peer_block_bodies(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> {
sync.clear_peer_download(peer_id); sync.clear_peer_download(peer_id);
let block_set = sync.peers.get(&peer_id) let block_set = sync.peers.get(&peer_id)
.and_then(|p| p.block_set) .and_then(|p| p.block_set)
@ -332,7 +332,7 @@ impl SyncHandler {
} }
} }
fn on_peer_fork_header(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> { fn on_peer_fork_header(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> {
{ {
let peer = sync.peers.get_mut(&peer_id).expect("Is only called when peer is present in peers"); let peer = sync.peers.get_mut(&peer_id).expect("Is only called when peer is present in peers");
peer.asking = PeerAsking::Nothing; peer.asking = PeerAsking::Nothing;
@ -364,7 +364,7 @@ impl SyncHandler {
} }
/// Called by peer once it has new block headers during sync /// Called by peer once it has new block headers during sync
fn on_peer_block_headers(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> { fn on_peer_block_headers(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> {
let is_fork_header_request = match sync.peers.get(&peer_id) { let is_fork_header_request = match sync.peers.get(&peer_id) {
Some(peer) if peer.asking == PeerAsking::ForkHeader => true, Some(peer) if peer.asking == PeerAsking::ForkHeader => true,
_ => false, _ => false,
@ -431,7 +431,7 @@ impl SyncHandler {
} }
/// Called by peer once it has new block receipts /// Called by peer once it has new block receipts
fn on_peer_block_receipts(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> { fn on_peer_block_receipts(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> {
sync.clear_peer_download(peer_id); sync.clear_peer_download(peer_id);
let block_set = sync.peers.get(&peer_id).and_then(|p| p.block_set).unwrap_or(BlockSet::NewBlocks); let block_set = sync.peers.get(&peer_id).and_then(|p| p.block_set).unwrap_or(BlockSet::NewBlocks);
let allowed = sync.peers.get(&peer_id).map(|p| p.is_allowed()).unwrap_or(false); let allowed = sync.peers.get(&peer_id).map(|p| p.is_allowed()).unwrap_or(false);
@ -473,7 +473,7 @@ impl SyncHandler {
} }
/// Called when snapshot manifest is downloaded from a peer. /// Called when snapshot manifest is downloaded from a peer.
fn on_snapshot_manifest(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> { fn on_snapshot_manifest(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> {
if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) {
trace!(target: "sync", "Ignoring snapshot manifest from unconfirmed peer {}", peer_id); trace!(target: "sync", "Ignoring snapshot manifest from unconfirmed peer {}", peer_id);
return Ok(()); return Ok(());
@ -502,7 +502,7 @@ impl SyncHandler {
} }
/// Called when snapshot data is downloaded from a peer. /// Called when snapshot data is downloaded from a peer.
fn on_snapshot_data(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> { fn on_snapshot_data(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> {
if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) {
trace!(target: "sync", "Ignoring snapshot data from unconfirmed peer {}", peer_id); trace!(target: "sync", "Ignoring snapshot data from unconfirmed peer {}", peer_id);
return Ok(()); return Ok(());
@ -568,7 +568,7 @@ impl SyncHandler {
} }
/// Called by peer to report status /// Called by peer to report status
fn on_peer_status(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> { fn on_peer_status(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> {
sync.handshaking_peers.remove(&peer_id); sync.handshaking_peers.remove(&peer_id);
let protocol_version: u8 = r.val_at(0)?; let protocol_version: u8 = r.val_at(0)?;
let warp_protocol_version = io.protocol_version(&WARP_SYNC_PROTOCOL_ID, peer_id); let warp_protocol_version = io.protocol_version(&WARP_SYNC_PROTOCOL_ID, peer_id);
@ -658,7 +658,7 @@ impl SyncHandler {
} }
/// Called when peer sends us new transactions /// Called when peer sends us new transactions
pub fn on_peer_transactions(sync: &ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { pub fn on_peer_transactions(sync: &ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> {
// Accept transactions only when fully synced // Accept transactions only when fully synced
if !io.is_chain_queue_empty() || (sync.state != SyncState::Idle && sync.state != SyncState::NewBlocks) { if !io.is_chain_queue_empty() || (sync.state != SyncState::Idle && sync.state != SyncState::NewBlocks) {
trace!(target: "sync", "{} Ignoring transactions while syncing", peer_id); trace!(target: "sync", "{} Ignoring transactions while syncing", peer_id);
@ -682,7 +682,7 @@ impl SyncHandler {
} }
/// Called when peer sends us signed private transaction packet /// Called when peer sends us signed private transaction packet
fn on_signed_private_transaction(sync: &mut ChainSync, _io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> { fn on_signed_private_transaction(sync: &mut ChainSync, _io: &mut dyn SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> {
if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) {
trace!(target: "sync", "{} Ignoring packet from unconfirmed/unknown peer", peer_id); trace!(target: "sync", "{} Ignoring packet from unconfirmed/unknown peer", peer_id);
return Ok(()); return Ok(());
@ -710,7 +710,7 @@ impl SyncHandler {
} }
/// Called when peer sends us new private transaction packet /// Called when peer sends us new private transaction packet
fn on_private_transaction(sync: &mut ChainSync, _io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> { fn on_private_transaction(sync: &mut ChainSync, _io: &mut dyn SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> {
if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) {
trace!(target: "sync", "{} Ignoring packet from unconfirmed/unknown peer", peer_id); trace!(target: "sync", "{} Ignoring packet from unconfirmed/unknown peer", peer_id);
return Ok(()); return Ok(());

View File

@ -386,8 +386,8 @@ impl ChainSyncApi {
/// Creates new `ChainSyncApi` /// Creates new `ChainSyncApi`
pub fn new( pub fn new(
config: SyncConfig, config: SyncConfig,
chain: &BlockChainClient, chain: &dyn BlockChainClient,
private_tx_handler: Option<Arc<PrivateTxHandler>>, private_tx_handler: Option<Arc<dyn PrivateTxHandler>>,
priority_tasks: mpsc::Receiver<PriorityTask>, priority_tasks: mpsc::Receiver<PriorityTask>,
) -> Self { ) -> Self {
ChainSyncApi { ChainSyncApi {
@ -421,7 +421,7 @@ impl ChainSyncApi {
} }
/// Dispatch incoming requests and responses /// Dispatch incoming requests and responses
pub fn dispatch_packet(&self, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) { pub fn dispatch_packet(&self, io: &mut dyn SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) {
SyncSupplier::dispatch_packet(&self.sync, io, peer, packet_id, data) SyncSupplier::dispatch_packet(&self.sync, io, peer, packet_id, data)
} }
@ -431,7 +431,7 @@ impl ChainSyncApi {
/// ///
/// NOTE This method should only handle stuff that can be canceled and would reach other peers /// NOTE This method should only handle stuff that can be canceled and would reach other peers
/// by other means. /// by other means.
pub fn process_priority_queue(&self, io: &mut SyncIo) { pub fn process_priority_queue(&self, io: &mut dyn SyncIo) {
fn check_deadline(deadline: Instant) -> Option<Duration> { fn check_deadline(deadline: Instant) -> Option<Duration> {
let now = Instant::now(); let now = Instant::now();
if now > deadline { if now > deadline {
@ -503,7 +503,7 @@ impl ChainSyncApi {
// Static methods // Static methods
impl ChainSync { impl ChainSync {
/// creates rlp to send for the tree defined by 'from' and 'to' hashes /// creates rlp to send for the tree defined by 'from' and 'to' hashes
fn create_new_hashes_rlp(chain: &BlockChainClient, from: &H256, to: &H256) -> Option<Bytes> { fn create_new_hashes_rlp(chain: &dyn BlockChainClient, from: &H256, to: &H256) -> Option<Bytes> {
match chain.tree_route(from, to) { match chain.tree_route(from, to) {
Some(route) => { Some(route) => {
let uncles = chain.find_uncles(from).unwrap_or_else(Vec::new); let uncles = chain.find_uncles(from).unwrap_or_else(Vec::new);
@ -538,7 +538,7 @@ impl ChainSync {
} }
/// creates latest block rlp for the given client /// creates latest block rlp for the given client
fn create_latest_block_rlp(chain: &BlockChainClient) -> Bytes { fn create_latest_block_rlp(chain: &dyn BlockChainClient) -> Bytes {
Self::create_block_rlp( Self::create_block_rlp(
&chain.block(BlockId::Hash(chain.chain_info().best_block_hash)) &chain.block(BlockId::Hash(chain.chain_info().best_block_hash))
.expect("Best block always exists").into_inner(), .expect("Best block always exists").into_inner(),
@ -547,7 +547,7 @@ impl ChainSync {
} }
/// creates given hash block rlp for the given client /// creates given hash block rlp for the given client
fn create_new_block_rlp(chain: &BlockChainClient, hash: &H256) -> Bytes { fn create_new_block_rlp(chain: &dyn BlockChainClient, hash: &H256) -> Bytes {
Self::create_block_rlp( Self::create_block_rlp(
&chain.block(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed").into_inner(), &chain.block(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed").into_inner(),
chain.block_total_difficulty(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed.") chain.block_total_difficulty(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed.")
@ -565,7 +565,7 @@ impl ChainSync {
peers peers
} }
fn get_init_state(warp_sync: WarpSync, chain: &BlockChainClient) -> SyncState { fn get_init_state(warp_sync: WarpSync, chain: &dyn BlockChainClient) -> SyncState {
let best_block = chain.chain_info().best_block_number; let best_block = chain.chain_info().best_block_number;
match warp_sync { match warp_sync {
WarpSync::Enabled => SyncState::WaitingPeers, WarpSync::Enabled => SyncState::WaitingPeers,
@ -620,7 +620,7 @@ pub struct ChainSync {
download_old_blocks: bool, download_old_blocks: bool,
/// Shared private tx service. /// Shared private tx service.
#[ignore_malloc_size_of = "arc on dyn trait here seems tricky, ignoring"] #[ignore_malloc_size_of = "arc on dyn trait here seems tricky, ignoring"]
private_tx_handler: Option<Arc<PrivateTxHandler>>, private_tx_handler: Option<Arc<dyn PrivateTxHandler>>,
/// Enable warp sync. /// Enable warp sync.
warp_sync: WarpSync, warp_sync: WarpSync,
@ -632,8 +632,8 @@ impl ChainSync {
/// Create a new instance of syncing strategy. /// Create a new instance of syncing strategy.
pub fn new( pub fn new(
config: SyncConfig, config: SyncConfig,
chain: &BlockChainClient, chain: &dyn BlockChainClient,
private_tx_handler: Option<Arc<PrivateTxHandler>>, private_tx_handler: Option<Arc<dyn PrivateTxHandler>>,
) -> Self { ) -> Self {
let chain_info = chain.chain_info(); let chain_info = chain.chain_info();
let best_block = chain.chain_info().best_block_number; let best_block = chain.chain_info().best_block_number;
@ -708,7 +708,7 @@ impl ChainSync {
} }
/// Abort all sync activity /// Abort all sync activity
pub fn abort(&mut self, io: &mut SyncIo) { pub fn abort(&mut self, io: &mut dyn SyncIo) {
self.reset_and_continue(io); self.reset_and_continue(io);
self.peers.clear(); self.peers.clear();
} }
@ -738,7 +738,7 @@ impl ChainSync {
/// Reset sync. Clear all downloaded data but keep the queue. /// Reset sync. Clear all downloaded data but keep the queue.
/// Set sync state to the given state or to the initial state if `None` is provided. /// Set sync state to the given state or to the initial state if `None` is provided.
fn reset(&mut self, io: &mut SyncIo, state: Option<SyncState>) { fn reset(&mut self, io: &mut dyn SyncIo, state: Option<SyncState>) {
self.new_blocks.reset(); self.new_blocks.reset();
let chain_info = io.chain().chain_info(); let chain_info = io.chain().chain_info();
for (_, ref mut p) in &mut self.peers { for (_, ref mut p) in &mut self.peers {
@ -760,7 +760,7 @@ impl ChainSync {
} }
/// Restart sync /// Restart sync
pub fn reset_and_continue(&mut self, io: &mut SyncIo) { pub fn reset_and_continue(&mut self, io: &mut dyn SyncIo) {
trace!(target: "sync", "Restarting"); trace!(target: "sync", "Restarting");
if self.state == SyncState::SnapshotData { if self.state == SyncState::SnapshotData {
debug!(target:"sync", "Aborting snapshot restore"); debug!(target:"sync", "Aborting snapshot restore");
@ -773,12 +773,12 @@ impl ChainSync {
/// Remove peer from active peer set. Peer will be reactivated on the next sync /// Remove peer from active peer set. Peer will be reactivated on the next sync
/// round. /// round.
fn deactivate_peer(&mut self, _io: &mut SyncIo, peer_id: PeerId) { fn deactivate_peer(&mut self, _io: &mut dyn SyncIo, peer_id: PeerId) {
trace!(target: "sync", "Deactivating peer {}", peer_id); trace!(target: "sync", "Deactivating peer {}", peer_id);
self.active_peers.remove(&peer_id); self.active_peers.remove(&peer_id);
} }
fn maybe_start_snapshot_sync(&mut self, io: &mut SyncIo) { fn maybe_start_snapshot_sync(&mut self, io: &mut dyn SyncIo) {
if !self.warp_sync.is_enabled() || io.snapshot_service().supported_versions().is_none() { if !self.warp_sync.is_enabled() || io.snapshot_service().supported_versions().is_none() {
trace!(target: "sync", "Skipping warp sync. Disabled or not supported."); trace!(target: "sync", "Skipping warp sync. Disabled or not supported.");
return; return;
@ -845,7 +845,7 @@ impl ChainSync {
} }
} }
fn start_snapshot_sync(&mut self, io: &mut SyncIo, peers: &[PeerId]) { fn start_snapshot_sync(&mut self, io: &mut dyn SyncIo, peers: &[PeerId]) {
if !self.snapshot.have_manifest() { if !self.snapshot.have_manifest() {
for p in peers { for p in peers {
if self.peers.get(p).map_or(false, |p| p.asking == PeerAsking::Nothing) { if self.peers.get(p).map_or(false, |p| p.asking == PeerAsking::Nothing) {
@ -861,13 +861,13 @@ impl ChainSync {
} }
/// Restart sync disregarding the block queue status. May end up re-downloading up to QUEUE_SIZE blocks /// Restart sync disregarding the block queue status. May end up re-downloading up to QUEUE_SIZE blocks
pub fn restart(&mut self, io: &mut SyncIo) { pub fn restart(&mut self, io: &mut dyn SyncIo) {
self.update_targets(io.chain()); self.update_targets(io.chain());
self.reset_and_continue(io); self.reset_and_continue(io);
} }
/// Update sync after the blockchain has been changed externally. /// Update sync after the blockchain has been changed externally.
pub fn update_targets(&mut self, chain: &BlockChainClient) { pub fn update_targets(&mut self, chain: &dyn BlockChainClient) {
// Do not assume that the block queue/chain still has our last_imported_block // Do not assume that the block queue/chain still has our last_imported_block
let chain = chain.chain_info(); let chain = chain.chain_info();
self.new_blocks = BlockDownloader::new(BlockSet::NewBlocks, &chain.best_block_hash, chain.best_block_number); self.new_blocks = BlockDownloader::new(BlockSet::NewBlocks, &chain.best_block_hash, chain.best_block_number);
@ -887,7 +887,7 @@ impl ChainSync {
} }
/// Resume downloading /// Resume downloading
pub fn continue_sync(&mut self, io: &mut SyncIo) { pub fn continue_sync(&mut self, io: &mut dyn SyncIo) {
if self.state == SyncState::Waiting { if self.state == SyncState::Waiting {
trace!(target: "sync", "Waiting for the block queue"); trace!(target: "sync", "Waiting for the block queue");
} else if self.state == SyncState::SnapshotWaiting { } else if self.state == SyncState::SnapshotWaiting {
@ -928,7 +928,7 @@ impl ChainSync {
} }
/// Called after all blocks have been downloaded /// Called after all blocks have been downloaded
fn complete_sync(&mut self, io: &mut SyncIo) { fn complete_sync(&mut self, io: &mut dyn SyncIo) {
trace!(target: "sync", "Sync complete"); trace!(target: "sync", "Sync complete");
self.reset(io, Some(SyncState::Idle)); self.reset(io, Some(SyncState::Idle));
} }
@ -940,7 +940,7 @@ impl ChainSync {
} }
/// Find something to do for a peer. Called for a new peer or when a peer is done with its task. /// Find something to do for a peer. Called for a new peer or when a peer is done with its task.
fn sync_peer(&mut self, io: &mut SyncIo, peer_id: PeerId, force: bool) { fn sync_peer(&mut self, io: &mut dyn SyncIo, peer_id: PeerId, force: bool) {
if !self.active_peers.contains(&peer_id) { if !self.active_peers.contains(&peer_id) {
trace!(target: "sync", "Skipping deactivated peer {}", peer_id); trace!(target: "sync", "Skipping deactivated peer {}", peer_id);
return; return;
@ -1081,7 +1081,7 @@ impl ChainSync {
} }
/// Checks if there are blocks fully downloaded that can be imported into the blockchain and does the import. /// Checks if there are blocks fully downloaded that can be imported into the blockchain and does the import.
fn collect_blocks(&mut self, io: &mut SyncIo, block_set: BlockSet) { fn collect_blocks(&mut self, io: &mut dyn SyncIo, block_set: BlockSet) {
match block_set { match block_set {
BlockSet::NewBlocks => { BlockSet::NewBlocks => {
if self.new_blocks.collect_blocks(io, self.state == SyncState::NewBlocks) == DownloadAction::Reset { if self.new_blocks.collect_blocks(io, self.state == SyncState::NewBlocks) == DownloadAction::Reset {
@ -1138,7 +1138,7 @@ impl ChainSync {
} }
/// Send Status message /// Send Status message
fn send_status(&mut self, io: &mut SyncIo, peer: PeerId) -> Result<(), network::Error> { fn send_status(&mut self, io: &mut dyn SyncIo, peer: PeerId) -> Result<(), network::Error> {
let warp_protocol_version = io.protocol_version(&WARP_SYNC_PROTOCOL_ID, peer); let warp_protocol_version = io.protocol_version(&WARP_SYNC_PROTOCOL_ID, peer);
let warp_protocol = warp_protocol_version != 0; let warp_protocol = warp_protocol_version != 0;
let private_tx_protocol = warp_protocol_version >= PAR_PROTOCOL_VERSION_3.0; let private_tx_protocol = warp_protocol_version >= PAR_PROTOCOL_VERSION_3.0;
@ -1166,7 +1166,7 @@ impl ChainSync {
io.respond(StatusPacket.id(), packet.out()) io.respond(StatusPacket.id(), packet.out())
} }
pub fn maintain_peers(&mut self, io: &mut SyncIo) { pub fn maintain_peers(&mut self, io: &mut dyn SyncIo) {
let tick = Instant::now(); let tick = Instant::now();
let mut aborting = Vec::new(); let mut aborting = Vec::new();
for (peer_id, peer) in &self.peers { for (peer_id, peer) in &self.peers {
@ -1200,7 +1200,7 @@ impl ChainSync {
} }
} }
fn check_resume(&mut self, io: &mut SyncIo) { fn check_resume(&mut self, io: &mut dyn SyncIo) {
match self.state { match self.state {
SyncState::Waiting if !io.chain().queue_info().is_full() => { SyncState::Waiting if !io.chain().queue_info().is_full() => {
self.set_state(SyncState::Blocks); self.set_state(SyncState::Blocks);
@ -1286,13 +1286,13 @@ impl ChainSync {
} }
/// Maintain other peers. Send out any new blocks and transactions /// Maintain other peers. Send out any new blocks and transactions
pub fn maintain_sync(&mut self, io: &mut SyncIo) { pub fn maintain_sync(&mut self, io: &mut dyn SyncIo) {
self.maybe_start_snapshot_sync(io); self.maybe_start_snapshot_sync(io);
self.check_resume(io); self.check_resume(io);
} }
/// called when block is imported to chain - propagates the blocks and updates transactions sent to peers /// called when block is imported to chain - propagates the blocks and updates transactions sent to peers
pub fn chain_new_blocks(&mut self, io: &mut SyncIo, _imported: &[H256], invalid: &[H256], enacted: &[H256], _retracted: &[H256], sealed: &[H256], proposed: &[Bytes]) { pub fn chain_new_blocks(&mut self, io: &mut dyn SyncIo, _imported: &[H256], invalid: &[H256], enacted: &[H256], _retracted: &[H256], sealed: &[H256], proposed: &[Bytes]) {
let queue_info = io.chain().queue_info(); let queue_info = io.chain().queue_info();
let is_syncing = self.status().is_syncing(queue_info); let is_syncing = self.status().is_syncing(queue_info);
@ -1318,22 +1318,22 @@ impl ChainSync {
} }
} }
pub fn on_packet(&mut self, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) { pub fn on_packet(&mut self, io: &mut dyn SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) {
SyncHandler::on_packet(self, io, peer, packet_id, data); SyncHandler::on_packet(self, io, peer, packet_id, data);
} }
/// Called by peer when it is disconnecting /// Called by peer when it is disconnecting
pub fn on_peer_aborting(&mut self, io: &mut SyncIo, peer: PeerId) { pub fn on_peer_aborting(&mut self, io: &mut dyn SyncIo, peer: PeerId) {
SyncHandler::on_peer_aborting(self, io, peer); SyncHandler::on_peer_aborting(self, io, peer);
} }
/// Called when a new peer is connected /// Called when a new peer is connected
pub fn on_peer_connected(&mut self, io: &mut SyncIo, peer: PeerId) { pub fn on_peer_connected(&mut self, io: &mut dyn SyncIo, peer: PeerId) {
SyncHandler::on_peer_connected(self, io, peer); SyncHandler::on_peer_connected(self, io, peer);
} }
/// propagates new transactions to all peers /// propagates new transactions to all peers
pub fn propagate_new_transactions(&mut self, io: &mut SyncIo) { pub fn propagate_new_transactions(&mut self, io: &mut dyn SyncIo) {
let deadline = Instant::now() + Duration::from_millis(500); let deadline = Instant::now() + Duration::from_millis(500);
SyncPropagator::propagate_new_transactions(self, io, || { SyncPropagator::propagate_new_transactions(self, io, || {
if deadline > Instant::now() { if deadline > Instant::now() {
@ -1346,12 +1346,12 @@ impl ChainSync {
} }
/// Broadcast consensus message to peers. /// Broadcast consensus message to peers.
pub fn propagate_consensus_packet(&mut self, io: &mut SyncIo, packet: Bytes) { pub fn propagate_consensus_packet(&mut self, io: &mut dyn SyncIo, packet: Bytes) {
SyncPropagator::propagate_consensus_packet(self, io, packet); SyncPropagator::propagate_consensus_packet(self, io, packet);
} }
/// Broadcast private transaction message to peers. /// Broadcast private transaction message to peers.
pub fn propagate_private_transaction(&mut self, io: &mut SyncIo, transaction_hash: H256, packet_id: SyncPacket, packet: Bytes) { pub fn propagate_private_transaction(&mut self, io: &mut dyn SyncIo, transaction_hash: H256, packet_id: SyncPacket, packet: Bytes) {
SyncPropagator::propagate_private_transaction(self, io, transaction_hash, packet_id, packet); SyncPropagator::propagate_private_transaction(self, io, transaction_hash, packet_id, packet);
} }
} }
@ -1455,7 +1455,7 @@ pub mod tests {
assert!(!sync_status(SyncState::Idle).is_syncing(queue_info(0, 0))); assert!(!sync_status(SyncState::Idle).is_syncing(queue_info(0, 0)));
} }
pub fn dummy_sync_with_peer(peer_latest_hash: H256, client: &BlockChainClient) -> ChainSync { pub fn dummy_sync_with_peer(peer_latest_hash: H256, client: &dyn BlockChainClient) -> ChainSync {
let mut sync = ChainSync::new(SyncConfig::default(), client, None,); let mut sync = ChainSync::new(SyncConfig::default(), client, None,);
insert_dummy_peer(&mut sync, 0, peer_latest_hash); insert_dummy_peer(&mut sync, 0, peer_latest_hash);

View File

@ -51,10 +51,10 @@ pub struct SyncPropagator;
impl SyncPropagator { impl SyncPropagator {
/// propagates latest block to a set of peers /// propagates latest block to a set of peers
pub fn propagate_blocks(sync: &mut ChainSync, chain_info: &BlockChainInfo, io: &mut SyncIo, blocks: &[H256], peers: &[PeerId]) -> usize { pub fn propagate_blocks(sync: &mut ChainSync, chain_info: &BlockChainInfo, io: &mut dyn SyncIo, blocks: &[H256], peers: &[PeerId]) -> usize {
trace!(target: "sync", "Sending NewBlocks to {:?}", peers); trace!(target: "sync", "Sending NewBlocks to {:?}", peers);
let sent = peers.len(); let sent = peers.len();
let mut send_packet = |io: &mut SyncIo, rlp: Bytes| { let mut send_packet = |io: &mut dyn SyncIo, rlp: Bytes| {
for peer_id in peers { for peer_id in peers {
SyncPropagator::send_packet(io, *peer_id, NewBlockPacket, rlp.clone()); SyncPropagator::send_packet(io, *peer_id, NewBlockPacket, rlp.clone());
@ -78,7 +78,7 @@ impl SyncPropagator {
} }
/// propagates new known hashes to all peers /// propagates new known hashes to all peers
pub fn propagate_new_hashes(sync: &mut ChainSync, chain_info: &BlockChainInfo, io: &mut SyncIo, peers: &[PeerId]) -> usize { pub fn propagate_new_hashes(sync: &mut ChainSync, chain_info: &BlockChainInfo, io: &mut dyn SyncIo, peers: &[PeerId]) -> usize {
trace!(target: "sync", "Sending NewHashes to {:?}", peers); trace!(target: "sync", "Sending NewHashes to {:?}", peers);
let last_parent = *io.chain().best_block_header().parent_hash(); let last_parent = *io.chain().best_block_header().parent_hash();
let best_block_hash = chain_info.best_block_hash; let best_block_hash = chain_info.best_block_hash;
@ -98,7 +98,7 @@ impl SyncPropagator {
} }
/// propagates new transactions to all peers /// propagates new transactions to all peers
pub fn propagate_new_transactions<F: FnMut() -> bool>(sync: &mut ChainSync, io: &mut SyncIo, mut should_continue: F) -> usize { pub fn propagate_new_transactions<F: FnMut() -> bool>(sync: &mut ChainSync, io: &mut dyn SyncIo, mut should_continue: F) -> usize {
// Early out if nobody to send to. // Early out if nobody to send to.
if sync.peers.is_empty() { if sync.peers.is_empty() {
return 0; return 0;
@ -141,7 +141,7 @@ impl SyncPropagator {
fn propagate_transactions_to_peers<F: FnMut() -> bool>( fn propagate_transactions_to_peers<F: FnMut() -> bool>(
sync: &mut ChainSync, sync: &mut ChainSync,
io: &mut SyncIo, io: &mut dyn SyncIo,
peers: Vec<PeerId>, peers: Vec<PeerId>,
transactions: Vec<&SignedTransaction>, transactions: Vec<&SignedTransaction>,
mut should_continue: F, mut should_continue: F,
@ -158,7 +158,7 @@ impl SyncPropagator {
// Clear old transactions from stats // Clear old transactions from stats
sync.transactions_stats.retain(&all_transactions_hashes); sync.transactions_stats.retain(&all_transactions_hashes);
let send_packet = |io: &mut SyncIo, peer_id: PeerId, sent: usize, rlp: Bytes| { let send_packet = |io: &mut dyn SyncIo, peer_id: PeerId, sent: usize, rlp: Bytes| {
let size = rlp.len(); let size = rlp.len();
SyncPropagator::send_packet(io, peer_id, TransactionsPacket, rlp); SyncPropagator::send_packet(io, peer_id, TransactionsPacket, rlp);
trace!(target: "sync", "{:02} <- Transactions ({} entries; {} bytes)", peer_id, sent, size); trace!(target: "sync", "{:02} <- Transactions ({} entries; {} bytes)", peer_id, sent, size);
@ -249,7 +249,7 @@ impl SyncPropagator {
sent_to_peers sent_to_peers
} }
pub fn propagate_latest_blocks(sync: &mut ChainSync, io: &mut SyncIo, sealed: &[H256]) { pub fn propagate_latest_blocks(sync: &mut ChainSync, io: &mut dyn SyncIo, sealed: &[H256]) {
let chain_info = io.chain().chain_info(); let chain_info = io.chain().chain_info();
if (((chain_info.best_block_number as i64) - (sync.last_sent_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION { if (((chain_info.best_block_number as i64) - (sync.last_sent_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION {
let peers = sync.get_lagging_peers(&chain_info); let peers = sync.get_lagging_peers(&chain_info);
@ -270,7 +270,7 @@ impl SyncPropagator {
} }
/// Distribute valid proposed blocks to subset of current peers. /// Distribute valid proposed blocks to subset of current peers.
pub fn propagate_proposed_blocks(sync: &mut ChainSync, io: &mut SyncIo, proposed: &[Bytes]) { pub fn propagate_proposed_blocks(sync: &mut ChainSync, io: &mut dyn SyncIo, proposed: &[Bytes]) {
let peers = sync.get_consensus_peers(); let peers = sync.get_consensus_peers();
trace!(target: "sync", "Sending proposed blocks to {:?}", peers); trace!(target: "sync", "Sending proposed blocks to {:?}", peers);
for block in proposed { for block in proposed {
@ -285,7 +285,7 @@ impl SyncPropagator {
} }
/// Broadcast consensus message to peers. /// Broadcast consensus message to peers.
pub fn propagate_consensus_packet(sync: &mut ChainSync, io: &mut SyncIo, packet: Bytes) { pub fn propagate_consensus_packet(sync: &mut ChainSync, io: &mut dyn SyncIo, packet: Bytes) {
let lucky_peers = ChainSync::select_random_peers(&sync.get_consensus_peers()); let lucky_peers = ChainSync::select_random_peers(&sync.get_consensus_peers());
trace!(target: "sync", "Sending consensus packet to {:?}", lucky_peers); trace!(target: "sync", "Sending consensus packet to {:?}", lucky_peers);
for peer_id in lucky_peers { for peer_id in lucky_peers {
@ -294,7 +294,7 @@ impl SyncPropagator {
} }
/// Broadcast private transaction message to peers. /// Broadcast private transaction message to peers.
pub fn propagate_private_transaction(sync: &mut ChainSync, io: &mut SyncIo, transaction_hash: H256, packet_id: SyncPacket, packet: Bytes) { pub fn propagate_private_transaction(sync: &mut ChainSync, io: &mut dyn SyncIo, transaction_hash: H256, packet_id: SyncPacket, packet: Bytes) {
let lucky_peers = ChainSync::select_random_peers(&sync.get_private_transaction_peers(&transaction_hash)); let lucky_peers = ChainSync::select_random_peers(&sync.get_private_transaction_peers(&transaction_hash));
if lucky_peers.is_empty() { if lucky_peers.is_empty() {
error!(target: "privatetx", "Cannot propagate the packet, no peers with private tx enabled connected"); error!(target: "privatetx", "Cannot propagate the packet, no peers with private tx enabled connected");
@ -325,7 +325,7 @@ impl SyncPropagator {
} }
/// Generic packet sender /// Generic packet sender
pub fn send_packet(sync: &mut SyncIo, peer_id: PeerId, packet_id: SyncPacket, packet: Bytes) { pub fn send_packet(sync: &mut dyn SyncIo, peer_id: PeerId, packet_id: SyncPacket, packet: Bytes) {
if let Err(e) = sync.send(peer_id, packet_id, packet) { if let Err(e) = sync.send(peer_id, packet_id, packet) {
debug!(target:"sync", "Error sending packet: {:?}", e); debug!(target:"sync", "Error sending packet: {:?}", e);
sync.disconnect_peer(peer_id); sync.disconnect_peer(peer_id);

View File

@ -43,7 +43,7 @@ pub struct SyncRequester;
impl SyncRequester { impl SyncRequester {
/// Perform block download request` /// Perform block download request`
pub fn request_blocks(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, request: BlockRequest, block_set: BlockSet) { pub fn request_blocks(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, request: BlockRequest, block_set: BlockSet) {
match request { match request {
BlockRequest::Headers { start, count, skip } => { BlockRequest::Headers { start, count, skip } => {
SyncRequester::request_headers_by_hash(sync, io, peer_id, &start, count, skip, false, block_set); SyncRequester::request_headers_by_hash(sync, io, peer_id, &start, count, skip, false, block_set);
@ -58,7 +58,7 @@ impl SyncRequester {
} }
/// Request block bodies from a peer /// Request block bodies from a peer
fn request_bodies(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, hashes: Vec<H256>, set: BlockSet) { fn request_bodies(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, hashes: Vec<H256>, set: BlockSet) {
let mut rlp = RlpStream::new_list(hashes.len()); let mut rlp = RlpStream::new_list(hashes.len());
trace!(target: "sync", "{} <- GetBlockBodies: {} entries starting from {:?}, set = {:?}", peer_id, hashes.len(), hashes.first(), set); trace!(target: "sync", "{} <- GetBlockBodies: {} entries starting from {:?}, set = {:?}", peer_id, hashes.len(), hashes.first(), set);
for h in &hashes { for h in &hashes {
@ -71,7 +71,7 @@ impl SyncRequester {
} }
/// Request headers from a peer by block number /// Request headers from a peer by block number
pub fn request_fork_header(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, n: BlockNumber) { pub fn request_fork_header(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, n: BlockNumber) {
trace!(target: "sync", "{} <- GetForkHeader: at {}", peer_id, n); trace!(target: "sync", "{} <- GetForkHeader: at {}", peer_id, n);
let mut rlp = RlpStream::new_list(4); let mut rlp = RlpStream::new_list(4);
rlp.append(&n); rlp.append(&n);
@ -82,7 +82,7 @@ impl SyncRequester {
} }
/// Find some headers or blocks to download for a peer. /// Find some headers or blocks to download for a peer.
pub fn request_snapshot_data(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId) { pub fn request_snapshot_data(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId) {
// find chunk data to download // find chunk data to download
if let Some(hash) = sync.snapshot.needed_chunk() { if let Some(hash) = sync.snapshot.needed_chunk() {
if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) {
@ -93,14 +93,14 @@ impl SyncRequester {
} }
/// Request snapshot manifest from a peer. /// Request snapshot manifest from a peer.
pub fn request_snapshot_manifest(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId) { pub fn request_snapshot_manifest(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId) {
trace!(target: "sync", "{} <- GetSnapshotManifest", peer_id); trace!(target: "sync", "{} <- GetSnapshotManifest", peer_id);
let rlp = RlpStream::new_list(0); let rlp = RlpStream::new_list(0);
SyncRequester::send_request(sync, io, peer_id, PeerAsking::SnapshotManifest, GetSnapshotManifestPacket, rlp.out()); SyncRequester::send_request(sync, io, peer_id, PeerAsking::SnapshotManifest, GetSnapshotManifestPacket, rlp.out());
} }
/// Request headers from a peer by block hash /// Request headers from a peer by block hash
fn request_headers_by_hash(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, h: &H256, count: u64, skip: u64, reverse: bool, set: BlockSet) { fn request_headers_by_hash(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, h: &H256, count: u64, skip: u64, reverse: bool, set: BlockSet) {
trace!(target: "sync", "{} <- GetBlockHeaders: {} entries starting from {}, set = {:?}", peer_id, count, h, set); trace!(target: "sync", "{} <- GetBlockHeaders: {} entries starting from {}, set = {:?}", peer_id, count, h, set);
let mut rlp = RlpStream::new_list(4); let mut rlp = RlpStream::new_list(4);
rlp.append(h); rlp.append(h);
@ -114,7 +114,7 @@ impl SyncRequester {
} }
/// Request block receipts from a peer /// Request block receipts from a peer
fn request_receipts(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, hashes: Vec<H256>, set: BlockSet) { fn request_receipts(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, hashes: Vec<H256>, set: BlockSet) {
let mut rlp = RlpStream::new_list(hashes.len()); let mut rlp = RlpStream::new_list(hashes.len());
trace!(target: "sync", "{} <- GetBlockReceipts: {} entries starting from {:?}, set = {:?}", peer_id, hashes.len(), hashes.first(), set); trace!(target: "sync", "{} <- GetBlockReceipts: {} entries starting from {:?}, set = {:?}", peer_id, hashes.len(), hashes.first(), set);
for h in &hashes { for h in &hashes {
@ -127,7 +127,7 @@ impl SyncRequester {
} }
/// Request snapshot chunk from a peer. /// Request snapshot chunk from a peer.
fn request_snapshot_chunk(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, chunk: &H256) { fn request_snapshot_chunk(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, chunk: &H256) {
trace!(target: "sync", "{} <- GetSnapshotData {:?}", peer_id, chunk); trace!(target: "sync", "{} <- GetSnapshotData {:?}", peer_id, chunk);
let mut rlp = RlpStream::new_list(1); let mut rlp = RlpStream::new_list(1);
rlp.append(chunk); rlp.append(chunk);
@ -135,7 +135,7 @@ impl SyncRequester {
} }
/// Generic request sender /// Generic request sender
fn send_request(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, asking: PeerAsking, packet_id: SyncPacket, packet: Bytes) { fn send_request(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, asking: PeerAsking, packet_id: SyncPacket, packet: Bytes) {
if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) {
if peer.asking != PeerAsking::Nothing { if peer.asking != PeerAsking::Nothing {
warn!(target:"sync", "Asking {:?} while requesting {:?}", peer.asking, asking); warn!(target:"sync", "Asking {:?} while requesting {:?}", peer.asking, asking);

View File

@ -63,7 +63,7 @@ impl SyncSupplier {
/// Dispatch incoming requests and responses /// Dispatch incoming requests and responses
// Take a u8 and not a SyncPacketId because this is the entry point // Take a u8 and not a SyncPacketId because this is the entry point
// to chain sync from the outside world. // to chain sync from the outside world.
pub fn dispatch_packet(sync: &RwLock<ChainSync>, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) { pub fn dispatch_packet(sync: &RwLock<ChainSync>, io: &mut dyn SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) {
let rlp = Rlp::new(data); let rlp = Rlp::new(data);
if let Some(id) = SyncPacket::from_u8(packet_id) { if let Some(id) = SyncPacket::from_u8(packet_id) {
@ -141,7 +141,7 @@ impl SyncSupplier {
} }
/// Respond to GetBlockHeaders request /// Respond to GetBlockHeaders request
fn return_block_headers(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { fn return_block_headers(io: &dyn SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult {
let payload_soft_limit = io.payload_soft_limit(); let payload_soft_limit = io.payload_soft_limit();
// Packet layout: // Packet layout:
// [ block: { P , B_32 }, maxHeaders: P, skip: P, reverse: P in { 0 , 1 } ] // [ block: { P , B_32 }, maxHeaders: P, skip: P, reverse: P in { 0 , 1 } ]
@ -222,7 +222,7 @@ impl SyncSupplier {
} }
/// Respond to GetBlockBodies request /// Respond to GetBlockBodies request
fn return_block_bodies(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { fn return_block_bodies(io: &dyn SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult {
let payload_soft_limit = io.payload_soft_limit(); let payload_soft_limit = io.payload_soft_limit();
let mut count = r.item_count().unwrap_or(0); let mut count = r.item_count().unwrap_or(0);
if count == 0 { if count == 0 {
@ -249,7 +249,7 @@ impl SyncSupplier {
} }
/// Respond to GetNodeData request /// Respond to GetNodeData request
fn return_node_data(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { fn return_node_data(io: &dyn SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult {
let payload_soft_limit = io.payload_soft_limit(); let payload_soft_limit = io.payload_soft_limit();
let mut count = r.item_count().unwrap_or(0); let mut count = r.item_count().unwrap_or(0);
trace!(target: "sync", "{} -> GetNodeData: {} entries", peer_id, count); trace!(target: "sync", "{} -> GetNodeData: {} entries", peer_id, count);
@ -280,7 +280,7 @@ impl SyncSupplier {
Ok(Some((NodeDataPacket.id(), rlp))) Ok(Some((NodeDataPacket.id(), rlp)))
} }
fn return_receipts(io: &SyncIo, rlp: &Rlp, peer_id: PeerId) -> RlpResponseResult { fn return_receipts(io: &dyn SyncIo, rlp: &Rlp, peer_id: PeerId) -> RlpResponseResult {
let payload_soft_limit = io.payload_soft_limit(); let payload_soft_limit = io.payload_soft_limit();
let mut count = rlp.item_count().unwrap_or(0); let mut count = rlp.item_count().unwrap_or(0);
trace!(target: "sync", "{} -> GetReceipts: {} entries", peer_id, count); trace!(target: "sync", "{} -> GetReceipts: {} entries", peer_id, count);
@ -307,7 +307,7 @@ impl SyncSupplier {
} }
/// Respond to GetSnapshotManifest request /// Respond to GetSnapshotManifest request
fn return_snapshot_manifest(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { fn return_snapshot_manifest(io: &dyn SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult {
let count = r.item_count().unwrap_or(0); let count = r.item_count().unwrap_or(0);
trace!(target: "warp", "{} -> GetSnapshotManifest", peer_id); trace!(target: "warp", "{} -> GetSnapshotManifest", peer_id);
if count != 0 { if count != 0 {
@ -330,7 +330,7 @@ impl SyncSupplier {
} }
/// Respond to GetSnapshotData request /// Respond to GetSnapshotData request
fn return_snapshot_data(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { fn return_snapshot_data(io: &dyn SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult {
let hash: H256 = r.val_at(0)?; let hash: H256 = r.val_at(0)?;
trace!(target: "warp", "{} -> GetSnapshotData {:?}", peer_id, hash); trace!(target: "warp", "{} -> GetSnapshotData {:?}", peer_id, hash);
let rlp = match io.snapshot_service().chunk(hash) { let rlp = match io.snapshot_service().chunk(hash) {
@ -348,8 +348,8 @@ impl SyncSupplier {
Ok(Some((SnapshotDataPacket.id(), rlp))) Ok(Some((SnapshotDataPacket.id(), rlp)))
} }
fn return_rlp<FRlp, FError>(io: &mut SyncIo, rlp: &Rlp, peer: PeerId, rlp_func: FRlp, error_func: FError) -> Result<(), PacketDecodeError> fn return_rlp<FRlp, FError>(io: &mut dyn SyncIo, rlp: &Rlp, peer: PeerId, rlp_func: FRlp, error_func: FError) -> Result<(), PacketDecodeError>
where FRlp : Fn(&SyncIo, &Rlp, PeerId) -> RlpResponseResult, where FRlp : Fn(&dyn SyncIo, &Rlp, PeerId) -> RlpResponseResult,
FError : FnOnce(network::Error) -> String FError : FnOnce(network::Error) -> String
{ {
let response = rlp_func(io, rlp, peer); let response = rlp_func(io, rlp, peer);
@ -405,7 +405,7 @@ mod test {
let mut client = TestBlockChainClient::new(); let mut client = TestBlockChainClient::new();
client.add_blocks(100, EachBlockWith::Nothing); client.add_blocks(100, EachBlockWith::Nothing);
let blocks: Vec<_> = (0 .. 100) let blocks: Vec<_> = (0 .. 100)
.map(|i| (&client as &BlockChainClient).block(BlockId::Number(i as BlockNumber)).map(|b| b.into_inner()).unwrap()).collect(); .map(|i| (&client as &dyn BlockChainClient).block(BlockId::Number(i as BlockNumber)).map(|b| b.into_inner()).unwrap()).collect();
let headers: Vec<_> = blocks.iter().map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap()).collect(); let headers: Vec<_> = blocks.iter().map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap()).collect();
let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect(); let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect();

View File

@ -120,7 +120,7 @@ impl AncestorSearch {
} }
} }
fn process_response<L>(self, ctx: &ResponseContext, client: &L) -> AncestorSearch fn process_response<L>(self, ctx: &dyn ResponseContext, client: &L) -> AncestorSearch
where L: AsLightClient where L: AsLightClient
{ {
let client = client.as_light_client(); let client = client.as_light_client();
@ -258,7 +258,7 @@ impl Deref for SyncStateWrapper {
struct ResponseCtx<'a> { struct ResponseCtx<'a> {
peer: PeerId, peer: PeerId,
req_id: ReqId, req_id: ReqId,
ctx: &'a BasicContext, ctx: &'a dyn BasicContext,
data: &'a [encoded::Header], data: &'a [encoded::Header],
} }
@ -292,7 +292,7 @@ struct PendingReq {
impl<L: AsLightClient + Send + Sync> Handler for LightSync<L> { impl<L: AsLightClient + Send + Sync> Handler for LightSync<L> {
fn on_connect( fn on_connect(
&self, &self,
ctx: &EventContext, ctx: &dyn EventContext,
status: &Status, status: &Status,
capabilities: &Capabilities capabilities: &Capabilities
) -> PeerStatus { ) -> PeerStatus {
@ -319,7 +319,7 @@ impl<L: AsLightClient + Send + Sync> Handler for LightSync<L> {
} }
} }
fn on_disconnect(&self, ctx: &EventContext, unfulfilled: &[ReqId]) { fn on_disconnect(&self, ctx: &dyn EventContext, unfulfilled: &[ReqId]) {
let peer_id = ctx.peer(); let peer_id = ctx.peer();
let peer = match self.peers.write().remove(&peer_id).map(|p| p.into_inner()) { let peer = match self.peers.write().remove(&peer_id).map(|p| p.into_inner()) {
@ -370,7 +370,7 @@ impl<L: AsLightClient + Send + Sync> Handler for LightSync<L> {
self.maintain_sync(ctx.as_basic()); self.maintain_sync(ctx.as_basic());
} }
fn on_announcement(&self, ctx: &EventContext, announcement: &Announcement) { fn on_announcement(&self, ctx: &dyn EventContext, announcement: &Announcement) {
let (last_td, chain_info) = { let (last_td, chain_info) = {
let peers = self.peers.read(); let peers = self.peers.read();
match peers.get(&ctx.peer()) { match peers.get(&ctx.peer()) {
@ -406,7 +406,7 @@ impl<L: AsLightClient + Send + Sync> Handler for LightSync<L> {
self.maintain_sync(ctx.as_basic()); self.maintain_sync(ctx.as_basic());
} }
fn on_responses(&self, ctx: &EventContext, req_id: ReqId, responses: &[request::Response]) { fn on_responses(&self, ctx: &dyn EventContext, req_id: ReqId, responses: &[request::Response]) {
let peer = ctx.peer(); let peer = ctx.peer();
if !self.peers.read().contains_key(&peer) { if !self.peers.read().contains_key(&peer) {
return return
@ -448,7 +448,7 @@ impl<L: AsLightClient + Send + Sync> Handler for LightSync<L> {
self.maintain_sync(ctx.as_basic()); self.maintain_sync(ctx.as_basic());
} }
fn tick(&self, ctx: &BasicContext) { fn tick(&self, ctx: &dyn BasicContext) {
self.maintain_sync(ctx); self.maintain_sync(ctx);
} }
} }
@ -492,7 +492,7 @@ impl<L: AsLightClient> LightSync<L> {
} }
// handles request dispatch, block import, state machine transitions, and timeouts. // handles request dispatch, block import, state machine transitions, and timeouts.
fn maintain_sync(&self, ctx: &BasicContext) { fn maintain_sync(&self, ctx: &dyn BasicContext) {
use ethcore::error::{Error as EthcoreError, ImportError}; use ethcore::error::{Error as EthcoreError, ImportError};
const DRAIN_AMOUNT: usize = 128; const DRAIN_AMOUNT: usize = 128;

View File

@ -53,7 +53,7 @@ impl Snapshot {
} }
/// Sync the Snapshot completed chunks with the Snapshot Service /// Sync the Snapshot completed chunks with the Snapshot Service
pub fn initialize(&mut self, snapshot_service: &SnapshotService) { pub fn initialize(&mut self, snapshot_service: &dyn SnapshotService) {
if self.initialized { if self.initialized {
return; return;
} }

View File

@ -37,9 +37,9 @@ pub trait SyncIo {
/// Send a packet to a peer using specified protocol. /// Send a packet to a peer using specified protocol.
fn send(&mut self, peer_id: PeerId, packet_id: SyncPacket, data: Vec<u8>) -> Result<(), Error>; fn send(&mut self, peer_id: PeerId, packet_id: SyncPacket, data: Vec<u8>) -> Result<(), Error>;
/// Get the blockchain /// Get the blockchain
fn chain(&self) -> &BlockChainClient; fn chain(&self) -> &dyn BlockChainClient;
/// Get the snapshot service. /// Get the snapshot service.
fn snapshot_service(&self) -> &SnapshotService; fn snapshot_service(&self) -> &dyn SnapshotService;
/// Returns peer version identifier /// Returns peer version identifier
fn peer_version(&self, peer_id: PeerId) -> ClientVersion { fn peer_version(&self, peer_id: PeerId) -> ClientVersion {
ClientVersion::from(peer_id.to_string()) ClientVersion::from(peer_id.to_string())
@ -64,17 +64,17 @@ pub trait SyncIo {
/// Wraps `NetworkContext` and the blockchain client /// Wraps `NetworkContext` and the blockchain client
pub struct NetSyncIo<'s> { pub struct NetSyncIo<'s> {
network: &'s NetworkContext, network: &'s dyn NetworkContext,
chain: &'s BlockChainClient, chain: &'s dyn BlockChainClient,
snapshot_service: &'s SnapshotService, snapshot_service: &'s dyn SnapshotService,
chain_overlay: &'s RwLock<HashMap<BlockNumber, Bytes>>, chain_overlay: &'s RwLock<HashMap<BlockNumber, Bytes>>,
} }
impl<'s> NetSyncIo<'s> { impl<'s> NetSyncIo<'s> {
/// Creates a new instance from the `NetworkContext` and the blockchain client reference. /// Creates a new instance from the `NetworkContext` and the blockchain client reference.
pub fn new(network: &'s NetworkContext, pub fn new(network: &'s dyn NetworkContext,
chain: &'s BlockChainClient, chain: &'s dyn BlockChainClient,
snapshot_service: &'s SnapshotService, snapshot_service: &'s dyn SnapshotService,
chain_overlay: &'s RwLock<HashMap<BlockNumber, Bytes>>) -> NetSyncIo<'s> { chain_overlay: &'s RwLock<HashMap<BlockNumber, Bytes>>) -> NetSyncIo<'s> {
NetSyncIo { NetSyncIo {
network: network, network: network,
@ -102,7 +102,7 @@ impl<'s> SyncIo for NetSyncIo<'s> {
self.network.send_protocol(packet_id.protocol(), peer_id, packet_id.id(), data) self.network.send_protocol(packet_id.protocol(), peer_id, packet_id.id(), data)
} }
fn chain(&self) -> &BlockChainClient { fn chain(&self) -> &dyn BlockChainClient {
self.chain self.chain
} }
@ -110,7 +110,7 @@ impl<'s> SyncIo for NetSyncIo<'s> {
self.chain_overlay self.chain_overlay
} }
fn snapshot_service(&self) -> &SnapshotService { fn snapshot_service(&self) -> &dyn SnapshotService {
self.snapshot_service self.snapshot_service
} }

View File

@ -46,8 +46,8 @@ fn authority_round() {
let chain_id = Spec::new_test_round().chain_id(); let chain_id = Spec::new_test_round().chain_id();
let mut net = TestNet::with_spec(2, SyncConfig::default(), Spec::new_test_round); let mut net = TestNet::with_spec(2, SyncConfig::default(), Spec::new_test_round);
let io_handler0: Arc<IoHandler<ClientIoMessage>> = Arc::new(TestIoHandler::new(net.peer(0).chain.clone())); let io_handler0: Arc<dyn IoHandler<ClientIoMessage>> = Arc::new(TestIoHandler::new(net.peer(0).chain.clone()));
let io_handler1: Arc<IoHandler<ClientIoMessage>> = Arc::new(TestIoHandler::new(net.peer(1).chain.clone())); let io_handler1: Arc<dyn IoHandler<ClientIoMessage>> = Arc::new(TestIoHandler::new(net.peer(1).chain.clone()));
// Push transaction to both clients. Only one of them gets lucky to produce a block. // Push transaction to both clients. Only one of them gets lucky to produce a block.
net.peer(0).miner.set_author(miner::Author::Sealer(engines::signer::from_keypair(s0.clone()))); net.peer(0).miner.set_author(miner::Author::Sealer(engines::signer::from_keypair(s0.clone())));
net.peer(1).miner.set_author(miner::Author::Sealer(engines::signer::from_keypair(s1.clone()))); net.peer(1).miner.set_author(miner::Author::Sealer(engines::signer::from_keypair(s1.clone())));

View File

@ -114,7 +114,7 @@ impl<'p, C> SyncIo for TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p {
Ok(()) Ok(())
} }
fn chain(&self) -> &BlockChainClient { fn chain(&self) -> &dyn BlockChainClient {
&*self.chain &*self.chain
} }
@ -126,7 +126,7 @@ impl<'p, C> SyncIo for TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p {
ClientVersion::from(client_id) ClientVersion::from(client_id)
} }
fn snapshot_service(&self) -> &SnapshotService { fn snapshot_service(&self) -> &dyn SnapshotService {
self.snapshot_service self.snapshot_service
} }

View File

@ -49,8 +49,8 @@ fn send_private_transaction() {
let mut net = TestNet::with_spec(2, SyncConfig::default(), seal_spec); let mut net = TestNet::with_spec(2, SyncConfig::default(), seal_spec);
let client0 = net.peer(0).chain.clone(); let client0 = net.peer(0).chain.clone();
let client1 = net.peer(1).chain.clone(); let client1 = net.peer(1).chain.clone();
let io_handler0: Arc<IoHandler<ClientIoMessage>> = Arc::new(TestIoHandler::new(net.peer(0).chain.clone())); let io_handler0: Arc<dyn IoHandler<ClientIoMessage>> = Arc::new(TestIoHandler::new(net.peer(0).chain.clone()));
let io_handler1: Arc<IoHandler<ClientIoMessage>> = Arc::new(TestIoHandler::new(net.peer(1).chain.clone())); let io_handler1: Arc<dyn IoHandler<ClientIoMessage>> = Arc::new(TestIoHandler::new(net.peer(1).chain.clone()));
net.peer(0).miner.set_author(miner::Author::Sealer(engines::signer::from_keypair(s0.clone()))); net.peer(0).miner.set_author(miner::Author::Sealer(engines::signer::from_keypair(s0.clone())));
net.peer(1).miner.set_author(miner::Author::Sealer(engines::signer::from_keypair(s1.clone()))); net.peer(1).miner.set_author(miner::Author::Sealer(engines::signer::from_keypair(s1.clone())));

View File

@ -44,7 +44,7 @@ extern crate unexpected;
#[macro_use] #[macro_use]
extern crate rlp_derive; extern crate rlp_derive;
extern crate parity_util_mem; extern crate parity_util_mem;
extern crate parity_util_mem as mem;
extern crate parity_util_mem as malloc_size_of; extern crate parity_util_mem as malloc_size_of;
#[cfg(test)] #[cfg(test)]

View File

@ -41,6 +41,7 @@ pub enum RestorationStatus {
/// Number of block chunks completed. /// Number of block chunks completed.
block_chunks_done: u32, block_chunks_done: u32,
}, },
/// Finalizing restoration
Finalizing, Finalizing,
/// Failed restoration. /// Failed restoration.
Failed, Failed,

View File

@ -119,5 +119,5 @@ impl fmt::Display for Error {
pub type Result<T> = ::std::result::Result<T, Error>; pub type Result<T> = ::std::result::Result<T, Error>;
pub type TrapResult<T, Call, Create> = ::std::result::Result<Result<T>, TrapError<Call, Create>>; pub type TrapResult<T, Call, Create> = ::std::result::Result<Result<T>, TrapError<Call, Create>>;
pub type ExecTrapResult<T> = TrapResult<T, Box<ResumeCall>, Box<ResumeCreate>>; pub type ExecTrapResult<T> = TrapResult<T, Box<dyn ResumeCall>, Box<dyn ResumeCreate>>;
pub type ExecTrapError = TrapError<Box<ResumeCall>, Box<ResumeCreate>>; pub type ExecTrapError = TrapError<Box<dyn ResumeCall>, Box<dyn ResumeCreate>>;

View File

@ -46,17 +46,17 @@ pub trait Exec: Send {
/// This function should be used to execute transaction. /// This function should be used to execute transaction.
/// It returns either an error, a known amount of gas left, or parameters to be used /// It returns either an error, a known amount of gas left, or parameters to be used
/// to compute the final gas left. /// to compute the final gas left.
fn exec(self: Box<Self>, ext: &mut Ext) -> ExecTrapResult<GasLeft>; fn exec(self: Box<Self>, ext: &mut dyn Ext) -> ExecTrapResult<GasLeft>;
} }
/// Resume call interface /// Resume call interface
pub trait ResumeCall: Send { pub trait ResumeCall: Send {
/// Resume an execution for call, returns back the Vm interface. /// Resume an execution for call, returns back the Vm interface.
fn resume_call(self: Box<Self>, result: MessageCallResult) -> Box<Exec>; fn resume_call(self: Box<Self>, result: MessageCallResult) -> Box<dyn Exec>;
} }
/// Resume create interface /// Resume create interface
pub trait ResumeCreate: Send { pub trait ResumeCreate: Send {
/// Resume an execution from create, returns back the Vm interface. /// Resume an execution from create, returns back the Vm interface.
fn resume_create(self: Box<Self>, result: ContractCreateResult) -> Box<Exec>; fn resume_create(self: Box<Self>, result: ContractCreateResult) -> Box<dyn Exec>;
} }

View File

@ -96,7 +96,7 @@ enum ExecutionOutcome {
} }
impl WasmInterpreter { impl WasmInterpreter {
pub fn run(self: Box<WasmInterpreter>, ext: &mut vm::Ext) -> vm::Result<GasLeft> { pub fn run(self: Box<WasmInterpreter>, ext: &mut dyn vm::Ext) -> vm::Result<GasLeft> {
let (module, data) = parser::payload(&self.params, ext.schedule().wasm())?; let (module, data) = parser::payload(&self.params, ext.schedule().wasm())?;
let loaded_module = wasmi::Module::from_parity_wasm_module(module).map_err(Error::Interpreter)?; let loaded_module = wasmi::Module::from_parity_wasm_module(module).map_err(Error::Interpreter)?;
@ -196,7 +196,7 @@ impl WasmInterpreter {
} }
impl vm::Exec for WasmInterpreter { impl vm::Exec for WasmInterpreter {
fn exec(self: Box<WasmInterpreter>, ext: &mut vm::Ext) -> vm::ExecTrapResult<GasLeft> { fn exec(self: Box<WasmInterpreter>, ext: &mut dyn vm::Ext) -> vm::ExecTrapResult<GasLeft> {
Ok(self.run(ext)) Ok(self.run(ext))
} }
} }

View File

@ -32,7 +32,7 @@ pub struct RuntimeContext {
pub struct Runtime<'a> { pub struct Runtime<'a> {
gas_counter: u64, gas_counter: u64,
gas_limit: u64, gas_limit: u64,
ext: &'a mut vm::Ext, ext: &'a mut dyn vm::Ext,
context: RuntimeContext, context: RuntimeContext,
memory: MemoryRef, memory: MemoryRef,
args: Vec<u8>, args: Vec<u8>,
@ -147,7 +147,7 @@ impl<'a> Runtime<'a> {
/// New runtime for wasm contract with specified params /// New runtime for wasm contract with specified params
pub fn with_params( pub fn with_params(
ext: &mut vm::Ext, ext: &mut dyn vm::Ext,
memory: MemoryRef, memory: MemoryRef,
gas_limit: u64, gas_limit: u64,
args: Vec<u8>, args: Vec<u8>,

View File

@ -48,7 +48,7 @@ macro_rules! reqrep_test {
fake_ext.info = $info; fake_ext.info = $info;
fake_ext.blockhashes = $block_hashes; fake_ext.blockhashes = $block_hashes;
let mut interpreter = wasm_interpreter(params); let interpreter = wasm_interpreter(params);
interpreter.exec(&mut fake_ext).ok().unwrap() interpreter.exec(&mut fake_ext).ok().unwrap()
.map(|result| match result { .map(|result| match result {
GasLeft::Known(_) => { panic!("Test is expected to return payload to check"); }, GasLeft::Known(_) => { panic!("Test is expected to return payload to check"); },
@ -83,7 +83,7 @@ fn empty() {
let mut ext = FakeExt::new().with_wasm(); let mut ext = FakeExt::new().with_wasm();
let gas_left = { let gas_left = {
let mut interpreter = wasm_interpreter(params); let interpreter = wasm_interpreter(params);
test_finalize(interpreter.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(interpreter.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -112,7 +112,7 @@ fn logger() {
let mut ext = FakeExt::new().with_wasm(); let mut ext = FakeExt::new().with_wasm();
let gas_left = { let gas_left = {
let mut interpreter = wasm_interpreter(params); let interpreter = wasm_interpreter(params);
test_finalize(interpreter.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(interpreter.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -164,7 +164,7 @@ fn identity() {
let mut ext = FakeExt::new().with_wasm(); let mut ext = FakeExt::new().with_wasm();
let (gas_left, result) = { let (gas_left, result) = {
let mut interpreter = wasm_interpreter(params); let interpreter = wasm_interpreter(params);
let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors");
match result { match result {
GasLeft::Known(_) => { panic!("Identity contract should return payload"); }, GasLeft::Known(_) => { panic!("Identity contract should return payload"); },
@ -199,7 +199,7 @@ fn dispersion() {
let mut ext = FakeExt::new().with_wasm(); let mut ext = FakeExt::new().with_wasm();
let (gas_left, result) = { let (gas_left, result) = {
let mut interpreter = wasm_interpreter(params); let interpreter = wasm_interpreter(params);
let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors");
match result { match result {
GasLeft::Known(_) => { panic!("Dispersion routine should return payload"); }, GasLeft::Known(_) => { panic!("Dispersion routine should return payload"); },
@ -227,7 +227,7 @@ fn suicide_not() {
let mut ext = FakeExt::new().with_wasm(); let mut ext = FakeExt::new().with_wasm();
let (gas_left, result) = { let (gas_left, result) = {
let mut interpreter = wasm_interpreter(params); let interpreter = wasm_interpreter(params);
let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors");
match result { match result {
GasLeft::Known(_) => { panic!("Suicidal contract should return payload when had not actualy killed himself"); }, GasLeft::Known(_) => { panic!("Suicidal contract should return payload when had not actualy killed himself"); },
@ -260,7 +260,7 @@ fn suicide() {
let mut ext = FakeExt::new().with_wasm(); let mut ext = FakeExt::new().with_wasm();
let gas_left = { let gas_left = {
let mut interpreter = wasm_interpreter(params); let interpreter = wasm_interpreter(params);
let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors");
match result { match result {
GasLeft::Known(gas) => gas, GasLeft::Known(gas) => gas,
@ -288,7 +288,7 @@ fn create() {
ext.schedule.wasm.as_mut().unwrap().have_create2 = true; ext.schedule.wasm.as_mut().unwrap().have_create2 = true;
let gas_left = { let gas_left = {
let mut interpreter = wasm_interpreter(params); let interpreter = wasm_interpreter(params);
let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors");
match result { match result {
GasLeft::Known(_) => { GasLeft::Known(_) => {
@ -353,7 +353,7 @@ fn call_msg() {
ext.balances.insert(receiver.clone(), U256::from(10000000000u64)); ext.balances.insert(receiver.clone(), U256::from(10000000000u64));
let gas_left = { let gas_left = {
let mut interpreter = wasm_interpreter(params); let interpreter = wasm_interpreter(params);
let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors");
match result { match result {
GasLeft::Known(gas_left) => gas_left, GasLeft::Known(gas_left) => gas_left,
@ -401,7 +401,7 @@ fn call_msg_gasleft() {
ext.balances.insert(receiver.clone(), U256::from(10000000000u64)); ext.balances.insert(receiver.clone(), U256::from(10000000000u64));
let gas_left = { let gas_left = {
let mut interpreter = wasm_interpreter(params); let interpreter = wasm_interpreter(params);
let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors");
match result { match result {
GasLeft::Known(gas_left) => gas_left, GasLeft::Known(gas_left) => gas_left,
@ -444,7 +444,7 @@ fn call_code() {
let mut ext = FakeExt::new().with_wasm(); let mut ext = FakeExt::new().with_wasm();
let (gas_left, result) = { let (gas_left, result) = {
let mut interpreter = wasm_interpreter(params); let interpreter = wasm_interpreter(params);
let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors");
match result { match result {
GasLeft::Known(_) => { panic!("Call test should return payload"); }, GasLeft::Known(_) => { panic!("Call test should return payload"); },
@ -492,7 +492,7 @@ fn call_static() {
let mut ext = FakeExt::new().with_wasm(); let mut ext = FakeExt::new().with_wasm();
let (gas_left, result) = { let (gas_left, result) = {
let mut interpreter = wasm_interpreter(params); let interpreter = wasm_interpreter(params);
let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors");
match result { match result {
GasLeft::Known(_) => { panic!("Static call test should return payload"); }, GasLeft::Known(_) => { panic!("Static call test should return payload"); },
@ -533,7 +533,7 @@ fn realloc() {
let mut ext = FakeExt::new().with_wasm(); let mut ext = FakeExt::new().with_wasm();
let (gas_left, result) = { let (gas_left, result) = {
let mut interpreter = wasm_interpreter(params); let interpreter = wasm_interpreter(params);
let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors");
match result { match result {
GasLeft::Known(_) => { panic!("Realloc should return payload"); }, GasLeft::Known(_) => { panic!("Realloc should return payload"); },
@ -555,7 +555,7 @@ fn alloc() {
let mut ext = FakeExt::new().with_wasm(); let mut ext = FakeExt::new().with_wasm();
let (gas_left, result) = { let (gas_left, result) = {
let mut interpreter = wasm_interpreter(params); let interpreter = wasm_interpreter(params);
let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors");
match result { match result {
GasLeft::Known(_) => { panic!("alloc test should return payload"); }, GasLeft::Known(_) => { panic!("alloc test should return payload"); },
@ -583,7 +583,7 @@ fn storage_read() {
ext.store.insert(hash, address.into()); ext.store.insert(hash, address.into());
let (gas_left, result) = { let (gas_left, result) = {
let mut interpreter = wasm_interpreter(params); let interpreter = wasm_interpreter(params);
let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors");
match result { match result {
GasLeft::Known(_) => { panic!("storage_read should return payload"); }, GasLeft::Known(_) => { panic!("storage_read should return payload"); },
@ -609,7 +609,7 @@ fn keccak() {
let mut ext = FakeExt::new().with_wasm(); let mut ext = FakeExt::new().with_wasm();
let (gas_left, result) = { let (gas_left, result) = {
let mut interpreter = wasm_interpreter(params); let interpreter = wasm_interpreter(params);
let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors");
match result { match result {
GasLeft::Known(_) => { panic!("keccak should return payload"); }, GasLeft::Known(_) => { panic!("keccak should return payload"); },
@ -759,7 +759,7 @@ fn storage_metering() {
]); ]);
let gas_left = { let gas_left = {
let mut interpreter = wasm_interpreter(params); let interpreter = wasm_interpreter(params);
test_finalize(interpreter.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(interpreter.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -778,7 +778,7 @@ fn storage_metering() {
]); ]);
let gas_left = { let gas_left = {
let mut interpreter = wasm_interpreter(params); let interpreter = wasm_interpreter(params);
test_finalize(interpreter.exec(&mut ext).ok().unwrap()).unwrap() test_finalize(interpreter.exec(&mut ext).ok().unwrap()).unwrap()
}; };
@ -929,7 +929,7 @@ fn embedded_keccak() {
let mut ext = FakeExt::new().with_wasm(); let mut ext = FakeExt::new().with_wasm();
let (gas_left, result) = { let (gas_left, result) = {
let mut interpreter = wasm_interpreter(params); let interpreter = wasm_interpreter(params);
let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors");
match result { match result {
GasLeft::Known(_) => { panic!("keccak should return payload"); }, GasLeft::Known(_) => { panic!("keccak should return payload"); },
@ -960,7 +960,7 @@ fn events() {
let mut ext = FakeExt::new().with_wasm(); let mut ext = FakeExt::new().with_wasm();
let (gas_left, result) = { let (gas_left, result) = {
let mut interpreter = wasm_interpreter(params); let interpreter = wasm_interpreter(params);
let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors"); let result = interpreter.exec(&mut ext).ok().unwrap().expect("Interpreter to execute without any errors");
match result { match result {
GasLeft::Known(_) => { panic!("events should return payload"); }, GasLeft::Known(_) => { panic!("events should return payload"); },

View File

@ -40,14 +40,14 @@ use traits::JournalDB;
/// that the states of any block the node has ever processed will be accessible. /// that the states of any block the node has ever processed will be accessible.
pub struct ArchiveDB { pub struct ArchiveDB {
overlay: super::MemoryDB, overlay: super::MemoryDB,
backing: Arc<KeyValueDB>, backing: Arc<dyn KeyValueDB>,
latest_era: Option<u64>, latest_era: Option<u64>,
column: Option<u32>, column: Option<u32>,
} }
impl ArchiveDB { impl ArchiveDB {
/// Create a new instance from a key-value db. /// Create a new instance from a key-value db.
pub fn new(backing: Arc<KeyValueDB>, column: Option<u32>) -> ArchiveDB { pub fn new(backing: Arc<dyn KeyValueDB>, column: Option<u32>) -> ArchiveDB {
let latest_era = backing.get(column, &LATEST_ERA_KEY) let latest_era = backing.get(column, &LATEST_ERA_KEY)
.expect("Low-level database error.") .expect("Low-level database error.")
.map(|val| decode::<u64>(&val).expect("decoding db value failed")); .map(|val| decode::<u64>(&val).expect("decoding db value failed"));
@ -114,7 +114,7 @@ impl ::traits::KeyedHashDB for ArchiveDB {
impl JournalDB for ArchiveDB { impl JournalDB for ArchiveDB {
fn boxed_clone(&self) -> Box<JournalDB> { fn boxed_clone(&self) -> Box<dyn JournalDB> {
Box::new(ArchiveDB { Box::new(ArchiveDB {
overlay: self.overlay.clone(), overlay: self.overlay.clone(),
backing: self.backing.clone(), backing: self.backing.clone(),
@ -193,7 +193,7 @@ impl JournalDB for ArchiveDB {
fn is_pruned(&self) -> bool { false } fn is_pruned(&self) -> bool { false }
fn backing(&self) -> &Arc<KeyValueDB> { fn backing(&self) -> &Arc<dyn KeyValueDB> {
&self.backing &self.backing
} }

View File

@ -26,46 +26,46 @@ use kvdb::DBValue;
use crate::{KeyedHashDB, AsKeyedHashDB}; use crate::{KeyedHashDB, AsKeyedHashDB};
impl AsHashDB<KeccakHasher, DBValue> for ArchiveDB { impl AsHashDB<KeccakHasher, DBValue> for ArchiveDB {
fn as_hash_db(&self) -> &HashDB<KeccakHasher, DBValue> { self } fn as_hash_db(&self) -> &dyn HashDB<KeccakHasher, DBValue> { self }
fn as_hash_db_mut(&mut self) -> &mut HashDB<KeccakHasher, DBValue> { self } fn as_hash_db_mut(&mut self) -> &mut dyn HashDB<KeccakHasher, DBValue> { self }
} }
impl AsHashDB<KeccakHasher, DBValue> for EarlyMergeDB { impl AsHashDB<KeccakHasher, DBValue> for EarlyMergeDB {
fn as_hash_db(&self) -> &HashDB<KeccakHasher, DBValue> { self } fn as_hash_db(&self) -> &dyn HashDB<KeccakHasher, DBValue> { self }
fn as_hash_db_mut(&mut self) -> &mut HashDB<KeccakHasher, DBValue> { self } fn as_hash_db_mut(&mut self) -> &mut dyn HashDB<KeccakHasher, DBValue> { self }
} }
impl AsHashDB<KeccakHasher, DBValue> for OverlayRecentDB { impl AsHashDB<KeccakHasher, DBValue> for OverlayRecentDB {
fn as_hash_db(&self) -> &HashDB<KeccakHasher, DBValue> { self } fn as_hash_db(&self) -> &dyn HashDB<KeccakHasher, DBValue> { self }
fn as_hash_db_mut(&mut self) -> &mut HashDB<KeccakHasher, DBValue> { self } fn as_hash_db_mut(&mut self) -> &mut dyn HashDB<KeccakHasher, DBValue> { self }
} }
impl AsHashDB<KeccakHasher, DBValue> for RefCountedDB { impl AsHashDB<KeccakHasher, DBValue> for RefCountedDB {
fn as_hash_db(&self) -> &HashDB<KeccakHasher, DBValue> { self } fn as_hash_db(&self) -> &dyn HashDB<KeccakHasher, DBValue> { self }
fn as_hash_db_mut(&mut self) -> &mut HashDB<KeccakHasher, DBValue> { self } fn as_hash_db_mut(&mut self) -> &mut dyn HashDB<KeccakHasher, DBValue> { self }
} }
impl AsHashDB<KeccakHasher, DBValue> for OverlayDB { impl AsHashDB<KeccakHasher, DBValue> for OverlayDB {
fn as_hash_db(&self) -> &HashDB<KeccakHasher, DBValue> { self } fn as_hash_db(&self) -> &dyn HashDB<KeccakHasher, DBValue> { self }
fn as_hash_db_mut(&mut self) -> &mut HashDB<KeccakHasher, DBValue> { self } fn as_hash_db_mut(&mut self) -> &mut dyn HashDB<KeccakHasher, DBValue> { self }
} }
impl AsKeyedHashDB for ArchiveDB { impl AsKeyedHashDB for ArchiveDB {
fn as_keyed_hash_db(&self) -> &KeyedHashDB { self } fn as_keyed_hash_db(&self) -> &dyn KeyedHashDB { self }
} }
impl AsKeyedHashDB for EarlyMergeDB { impl AsKeyedHashDB for EarlyMergeDB {
fn as_keyed_hash_db(&self) -> &KeyedHashDB { self } fn as_keyed_hash_db(&self) -> &dyn KeyedHashDB { self }
} }
impl AsKeyedHashDB for OverlayRecentDB { impl AsKeyedHashDB for OverlayRecentDB {
fn as_keyed_hash_db(&self) -> &KeyedHashDB { self } fn as_keyed_hash_db(&self) -> &dyn KeyedHashDB { self }
} }
impl AsKeyedHashDB for RefCountedDB { impl AsKeyedHashDB for RefCountedDB {
fn as_keyed_hash_db(&self) -> &KeyedHashDB { self } fn as_keyed_hash_db(&self) -> &dyn KeyedHashDB { self }
} }
impl AsKeyedHashDB for OverlayDB { impl AsKeyedHashDB for OverlayDB {
fn as_keyed_hash_db(&self) -> &KeyedHashDB { self } fn as_keyed_hash_db(&self) -> &dyn KeyedHashDB { self }
} }

View File

@ -103,7 +103,7 @@ enum RemoveFrom {
/// TODO: `store_reclaim_period` /// TODO: `store_reclaim_period`
pub struct EarlyMergeDB { pub struct EarlyMergeDB {
overlay: super::MemoryDB, overlay: super::MemoryDB,
backing: Arc<KeyValueDB>, backing: Arc<dyn KeyValueDB>,
refs: Option<Arc<RwLock<HashMap<H256, RefInfo>>>>, refs: Option<Arc<RwLock<HashMap<H256, RefInfo>>>>,
latest_era: Option<u64>, latest_era: Option<u64>,
column: Option<u32>, column: Option<u32>,
@ -111,7 +111,7 @@ pub struct EarlyMergeDB {
impl EarlyMergeDB { impl EarlyMergeDB {
/// Create a new instance from file /// Create a new instance from file
pub fn new(backing: Arc<KeyValueDB>, col: Option<u32>) -> EarlyMergeDB { pub fn new(backing: Arc<dyn KeyValueDB>, col: Option<u32>) -> EarlyMergeDB {
let (latest_era, refs) = EarlyMergeDB::read_refs(&*backing, col); let (latest_era, refs) = EarlyMergeDB::read_refs(&*backing, col);
let refs = Some(Arc::new(RwLock::new(refs))); let refs = Some(Arc::new(RwLock::new(refs)));
EarlyMergeDB { EarlyMergeDB {
@ -132,11 +132,11 @@ impl EarlyMergeDB {
// The next three are valid only as long as there is an insert operation of `key` in the journal. // The next three are valid only as long as there is an insert operation of `key` in the journal.
fn set_already_in(batch: &mut DBTransaction, col: Option<u32>, key: &H256) { batch.put(col, &Self::morph_key(key, 0), &[1u8]); } fn set_already_in(batch: &mut DBTransaction, col: Option<u32>, key: &H256) { batch.put(col, &Self::morph_key(key, 0), &[1u8]); }
fn reset_already_in(batch: &mut DBTransaction, col: Option<u32>, key: &H256) { batch.delete(col, &Self::morph_key(key, 0)); } fn reset_already_in(batch: &mut DBTransaction, col: Option<u32>, key: &H256) { batch.delete(col, &Self::morph_key(key, 0)); }
fn is_already_in(backing: &KeyValueDB, col: Option<u32>, key: &H256) -> bool { fn is_already_in(backing: &dyn KeyValueDB, col: Option<u32>, key: &H256) -> bool {
backing.get(col, &Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some() backing.get(col, &Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some()
} }
fn insert_keys(inserts: &[(H256, DBValue)], backing: &KeyValueDB, col: Option<u32>, refs: &mut HashMap<H256, RefInfo>, batch: &mut DBTransaction) { fn insert_keys(inserts: &[(H256, DBValue)], backing: &dyn KeyValueDB, col: Option<u32>, refs: &mut HashMap<H256, RefInfo>, batch: &mut DBTransaction) {
for &(ref h, ref d) in inserts { for &(ref h, ref d) in inserts {
match refs.entry(*h) { match refs.entry(*h) {
Entry::Occupied(mut entry) => { Entry::Occupied(mut entry) => {
@ -169,7 +169,7 @@ impl EarlyMergeDB {
} }
} }
fn replay_keys(inserts: &[H256], backing: &KeyValueDB, col: Option<u32>, refs: &mut HashMap<H256, RefInfo>) { fn replay_keys(inserts: &[H256], backing: &dyn KeyValueDB, col: Option<u32>, refs: &mut HashMap<H256, RefInfo>) {
trace!(target: "jdb.fine", "replay_keys: inserts={:?}, refs={:?}", inserts, refs); trace!(target: "jdb.fine", "replay_keys: inserts={:?}, refs={:?}", inserts, refs);
for h in inserts { for h in inserts {
match refs.entry(*h) { match refs.entry(*h) {
@ -259,7 +259,7 @@ impl EarlyMergeDB {
.expect("Low-level database error. Some issue with your hard disk?") .expect("Low-level database error. Some issue with your hard disk?")
} }
fn read_refs(db: &KeyValueDB, col: Option<u32>) -> (Option<u64>, HashMap<H256, RefInfo>) { fn read_refs(db: &dyn KeyValueDB, col: Option<u32>) -> (Option<u64>, HashMap<H256, RefInfo>) {
let mut refs = HashMap::new(); let mut refs = HashMap::new();
let mut latest_era = None; let mut latest_era = None;
if let Some(val) = db.get(col, &LATEST_ERA_KEY).expect("Low-level database error.") { if let Some(val) = db.get(col, &LATEST_ERA_KEY).expect("Low-level database error.") {
@ -332,7 +332,7 @@ impl ::traits::KeyedHashDB for EarlyMergeDB {
} }
impl JournalDB for EarlyMergeDB { impl JournalDB for EarlyMergeDB {
fn boxed_clone(&self) -> Box<JournalDB> { fn boxed_clone(&self) -> Box<dyn JournalDB> {
Box::new(EarlyMergeDB { Box::new(EarlyMergeDB {
overlay: self.overlay.clone(), overlay: self.overlay.clone(),
backing: self.backing.clone(), backing: self.backing.clone(),
@ -346,7 +346,7 @@ impl JournalDB for EarlyMergeDB {
self.backing.get(self.column, &LATEST_ERA_KEY).expect("Low level database error").is_none() self.backing.get(self.column, &LATEST_ERA_KEY).expect("Low level database error").is_none()
} }
fn backing(&self) -> &Arc<KeyValueDB> { fn backing(&self) -> &Arc<dyn KeyValueDB> {
&self.backing &self.backing
} }

View File

@ -152,7 +152,7 @@ impl fmt::Display for Algorithm {
} }
/// Create a new `JournalDB` trait object over a generic key-value database. /// Create a new `JournalDB` trait object over a generic key-value database.
pub fn new(backing: Arc<::kvdb::KeyValueDB>, algorithm: Algorithm, col: Option<u32>) -> Box<JournalDB> { pub fn new(backing: Arc<dyn (::kvdb::KeyValueDB)>, algorithm: Algorithm, col: Option<u32>) -> Box<dyn JournalDB> {
match algorithm { match algorithm {
Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(backing, col)), Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(backing, col)),
Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(backing, col)), Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(backing, col)),

View File

@ -39,7 +39,7 @@ use super::{error_negatively_reference_hash};
#[derive(Clone)] #[derive(Clone)]
pub struct OverlayDB { pub struct OverlayDB {
overlay: super::MemoryDB, overlay: super::MemoryDB,
backing: Arc<KeyValueDB>, backing: Arc<dyn KeyValueDB>,
column: Option<u32>, column: Option<u32>,
} }
@ -78,7 +78,7 @@ impl Decodable for Payload {
impl OverlayDB { impl OverlayDB {
/// Create a new instance of OverlayDB given a `backing` database. /// Create a new instance of OverlayDB given a `backing` database.
pub fn new(backing: Arc<KeyValueDB>, col: Option<u32>) -> OverlayDB { pub fn new(backing: Arc<dyn KeyValueDB>, col: Option<u32>) -> OverlayDB {
OverlayDB{ overlay: ::new_memory_db(), backing: backing, column: col } OverlayDB{ overlay: ::new_memory_db(), backing: backing, column: col }
} }

View File

@ -66,7 +66,7 @@ use util::DatabaseKey;
pub struct OverlayRecentDB { pub struct OverlayRecentDB {
transaction_overlay: super::MemoryDB, transaction_overlay: super::MemoryDB,
backing: Arc<KeyValueDB>, backing: Arc<dyn KeyValueDB>,
journal_overlay: Arc<RwLock<JournalOverlay>>, journal_overlay: Arc<RwLock<JournalOverlay>>,
column: Option<u32>, column: Option<u32>,
} }
@ -147,7 +147,7 @@ impl Clone for OverlayRecentDB {
impl OverlayRecentDB { impl OverlayRecentDB {
/// Create a new instance. /// Create a new instance.
pub fn new(backing: Arc<KeyValueDB>, col: Option<u32>) -> OverlayRecentDB { pub fn new(backing: Arc<dyn KeyValueDB>, col: Option<u32>) -> OverlayRecentDB {
let journal_overlay = Arc::new(RwLock::new(OverlayRecentDB::read_overlay(&*backing, col))); let journal_overlay = Arc::new(RwLock::new(OverlayRecentDB::read_overlay(&*backing, col)));
OverlayRecentDB { OverlayRecentDB {
transaction_overlay: ::new_memory_db(), transaction_overlay: ::new_memory_db(),
@ -174,7 +174,7 @@ impl OverlayRecentDB {
.expect("Low-level database error. Some issue with your hard disk?") .expect("Low-level database error. Some issue with your hard disk?")
} }
fn read_overlay(db: &KeyValueDB, col: Option<u32>) -> JournalOverlay { fn read_overlay(db: &dyn KeyValueDB, col: Option<u32>) -> JournalOverlay {
let mut journal = HashMap::new(); let mut journal = HashMap::new();
let mut overlay = ::new_memory_db(); let mut overlay = ::new_memory_db();
let mut count = 0; let mut count = 0;
@ -260,7 +260,7 @@ impl ::traits::KeyedHashDB for OverlayRecentDB {
impl JournalDB for OverlayRecentDB { impl JournalDB for OverlayRecentDB {
fn boxed_clone(&self) -> Box<JournalDB> { fn boxed_clone(&self) -> Box<dyn JournalDB> {
Box::new(self.clone()) Box::new(self.clone())
} }
@ -285,7 +285,7 @@ impl JournalDB for OverlayRecentDB {
self.backing.get(self.column, &LATEST_ERA_KEY).expect("Low level database error").is_none() self.backing.get(self.column, &LATEST_ERA_KEY).expect("Low level database error").is_none()
} }
fn backing(&self) -> &Arc<KeyValueDB> { fn backing(&self) -> &Arc<dyn KeyValueDB> {
&self.backing &self.backing
} }

View File

@ -54,7 +54,7 @@ use util::{DatabaseKey, DatabaseValueView, DatabaseValueRef};
// TODO: store last_era, reclaim_period. // TODO: store last_era, reclaim_period.
pub struct RefCountedDB { pub struct RefCountedDB {
forward: OverlayDB, forward: OverlayDB,
backing: Arc<KeyValueDB>, backing: Arc<dyn KeyValueDB>,
latest_era: Option<u64>, latest_era: Option<u64>,
inserts: Vec<H256>, inserts: Vec<H256>,
removes: Vec<H256>, removes: Vec<H256>,
@ -63,7 +63,7 @@ pub struct RefCountedDB {
impl RefCountedDB { impl RefCountedDB {
/// Create a new instance given a `backing` database. /// Create a new instance given a `backing` database.
pub fn new(backing: Arc<KeyValueDB>, column: Option<u32>) -> RefCountedDB { pub fn new(backing: Arc<dyn KeyValueDB>, column: Option<u32>) -> RefCountedDB {
let latest_era = backing.get(column, &LATEST_ERA_KEY) let latest_era = backing.get(column, &LATEST_ERA_KEY)
.expect("Low-level database error.") .expect("Low-level database error.")
.map(|v| decode::<u64>(&v).expect("decoding db value failed")); .map(|v| decode::<u64>(&v).expect("decoding db value failed"));
@ -92,7 +92,7 @@ impl ::traits::KeyedHashDB for RefCountedDB {
} }
impl JournalDB for RefCountedDB { impl JournalDB for RefCountedDB {
fn boxed_clone(&self) -> Box<JournalDB> { fn boxed_clone(&self) -> Box<dyn JournalDB> {
Box::new(RefCountedDB { Box::new(RefCountedDB {
forward: self.forward.clone(), forward: self.forward.clone(),
backing: self.backing.clone(), backing: self.backing.clone(),
@ -112,7 +112,7 @@ impl JournalDB for RefCountedDB {
self.latest_era.is_none() self.latest_era.is_none()
} }
fn backing(&self) -> &Arc<KeyValueDB> { fn backing(&self) -> &Arc<dyn KeyValueDB> {
&self.backing &self.backing
} }

View File

@ -36,7 +36,7 @@ pub trait KeyedHashDB: HashDB<KeccakHasher, DBValue> {
/// Upcast to `KeyedHashDB` /// Upcast to `KeyedHashDB`
pub trait AsKeyedHashDB: AsHashDB<KeccakHasher, DBValue> { pub trait AsKeyedHashDB: AsHashDB<KeccakHasher, DBValue> {
/// Perform upcast to KeyedHashDB. /// Perform upcast to KeyedHashDB.
fn as_keyed_hash_db(&self) -> &KeyedHashDB; fn as_keyed_hash_db(&self) -> &dyn KeyedHashDB;
} }
/// A `HashDB` which can manage a short-term journal potentially containing many forks of mutually /// A `HashDB` which can manage a short-term journal potentially containing many forks of mutually
@ -44,7 +44,7 @@ pub trait AsKeyedHashDB: AsHashDB<KeccakHasher, DBValue> {
pub trait JournalDB: KeyedHashDB { pub trait JournalDB: KeyedHashDB {
/// Return a copy of ourself, in a box. /// Return a copy of ourself, in a box.
fn boxed_clone(&self) -> Box<JournalDB>; fn boxed_clone(&self) -> Box<dyn JournalDB>;
/// Returns heap memory size used /// Returns heap memory size used
fn mem_used(&self) -> usize; fn mem_used(&self) -> usize;
@ -86,7 +86,7 @@ pub trait JournalDB: KeyedHashDB {
fn is_pruned(&self) -> bool { true } fn is_pruned(&self) -> bool { true }
/// Get backing database. /// Get backing database.
fn backing(&self) -> &Arc<kvdb::KeyValueDB>; fn backing(&self) -> &Arc<dyn kvdb::KeyValueDB>;
/// Clear internal strucutres. This should called after changes have been written /// Clear internal strucutres. This should called after changes have been written
/// to the backing strage /// to the backing strage