fix compile warnings (#10993)
* fix warnings * fix: failing build, use `spec` as dev-dependency
This commit is contained in:
parent
505e284932
commit
dab2a6bd4b
@ -284,7 +284,7 @@ impl<T> KeyDirectory for DiskDirectory<T> where T: KeyFileManager {
|
||||
|
||||
fn path(&self) -> Option<&PathBuf> { Some(&self.path) }
|
||||
|
||||
fn as_vault_provider(&self) -> Option<&VaultKeyDirectoryProvider> {
|
||||
fn as_vault_provider(&self) -> Option<&dyn VaultKeyDirectoryProvider> {
|
||||
Some(self)
|
||||
}
|
||||
|
||||
@ -294,12 +294,12 @@ impl<T> KeyDirectory for DiskDirectory<T> where T: KeyFileManager {
|
||||
}
|
||||
|
||||
impl<T> VaultKeyDirectoryProvider for DiskDirectory<T> where T: KeyFileManager {
|
||||
fn create(&self, name: &str, key: VaultKey) -> Result<Box<VaultKeyDirectory>, Error> {
|
||||
fn create(&self, name: &str, key: VaultKey) -> Result<Box<dyn VaultKeyDirectory>, Error> {
|
||||
let vault_dir = VaultDiskDirectory::create(&self.path, name, key)?;
|
||||
Ok(Box::new(vault_dir))
|
||||
}
|
||||
|
||||
fn open(&self, name: &str, key: VaultKey) -> Result<Box<VaultKeyDirectory>, Error> {
|
||||
fn open(&self, name: &str, key: VaultKey) -> Result<Box<dyn VaultKeyDirectory>, Error> {
|
||||
let vault_dir = VaultDiskDirectory::at(&self.path, name, key)?;
|
||||
Ok(Box::new(vault_dir))
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ pub trait KeyDirectory: Send + Sync {
|
||||
/// Get directory filesystem path, if available
|
||||
fn path(&self) -> Option<&PathBuf> { None }
|
||||
/// Return vault provider, if available
|
||||
fn as_vault_provider(&self) -> Option<&VaultKeyDirectoryProvider> { None }
|
||||
fn as_vault_provider(&self) -> Option<&dyn VaultKeyDirectoryProvider> { None }
|
||||
/// Unique representation of directory account collection
|
||||
fn unique_repr(&self) -> Result<u64, Error>;
|
||||
}
|
||||
@ -65,9 +65,9 @@ pub trait KeyDirectory: Send + Sync {
|
||||
/// Vaults provider
|
||||
pub trait VaultKeyDirectoryProvider {
|
||||
/// Create new vault with given key
|
||||
fn create(&self, name: &str, key: VaultKey) -> Result<Box<VaultKeyDirectory>, Error>;
|
||||
fn create(&self, name: &str, key: VaultKey) -> Result<Box<dyn VaultKeyDirectory>, Error>;
|
||||
/// Open existing vault with given key
|
||||
fn open(&self, name: &str, key: VaultKey) -> Result<Box<VaultKeyDirectory>, Error>;
|
||||
fn open(&self, name: &str, key: VaultKey) -> Result<Box<dyn VaultKeyDirectory>, Error>;
|
||||
/// List all vaults
|
||||
fn list_vaults(&self) -> Result<Vec<String>, Error>;
|
||||
/// Get vault meta
|
||||
@ -77,7 +77,7 @@ pub trait VaultKeyDirectoryProvider {
|
||||
/// Vault directory
|
||||
pub trait VaultKeyDirectory: KeyDirectory {
|
||||
/// Cast to `KeyDirectory`
|
||||
fn as_key_directory(&self) -> &KeyDirectory;
|
||||
fn as_key_directory(&self) -> &dyn KeyDirectory;
|
||||
/// Vault name
|
||||
fn name(&self) -> &str;
|
||||
/// Get vault key
|
||||
|
@ -119,7 +119,7 @@ impl VaultDiskDirectory {
|
||||
}
|
||||
|
||||
impl VaultKeyDirectory for VaultDiskDirectory {
|
||||
fn as_key_directory(&self) -> &KeyDirectory {
|
||||
fn as_key_directory(&self) -> &dyn KeyDirectory {
|
||||
self
|
||||
}
|
||||
|
||||
|
@ -36,12 +36,12 @@ pub struct EthStore {
|
||||
|
||||
impl EthStore {
|
||||
/// Open a new accounts store with given key directory backend.
|
||||
pub fn open(directory: Box<KeyDirectory>) -> Result<Self, Error> {
|
||||
pub fn open(directory: Box<dyn KeyDirectory>) -> Result<Self, Error> {
|
||||
Self::open_with_iterations(directory, KEY_ITERATIONS as u32)
|
||||
}
|
||||
|
||||
/// Open a new account store with given key directory backend and custom number of iterations.
|
||||
pub fn open_with_iterations(directory: Box<KeyDirectory>, iterations: u32) -> Result<Self, Error> {
|
||||
pub fn open_with_iterations(directory: Box<dyn KeyDirectory>, iterations: u32) -> Result<Self, Error> {
|
||||
Ok(EthStore {
|
||||
store: EthMultiStore::open_with_iterations(directory, iterations)?,
|
||||
})
|
||||
@ -184,7 +184,7 @@ impl SecretStore for EthStore {
|
||||
Ok(account.check_password(password))
|
||||
}
|
||||
|
||||
fn copy_account(&self, new_store: &SimpleSecretStore, new_vault: SecretVaultRef, account: &StoreAccountRef, password: &Password, new_password: &Password) -> Result<(), Error> {
|
||||
fn copy_account(&self, new_store: &dyn SimpleSecretStore, new_vault: SecretVaultRef, account: &StoreAccountRef, password: &Password, new_password: &Password) -> Result<(), Error> {
|
||||
let account = self.get(account)?;
|
||||
let secret = account.crypto.secret(password)?;
|
||||
new_store.insert_account(new_vault, secret, new_password)?;
|
||||
@ -256,11 +256,11 @@ impl SecretStore for EthStore {
|
||||
|
||||
/// Similar to `EthStore` but may store many accounts (with different passwords) for the same `Address`
|
||||
pub struct EthMultiStore {
|
||||
dir: Box<KeyDirectory>,
|
||||
dir: Box<dyn KeyDirectory>,
|
||||
iterations: u32,
|
||||
// order lock: cache, then vaults
|
||||
cache: RwLock<BTreeMap<StoreAccountRef, Vec<SafeAccount>>>,
|
||||
vaults: Mutex<HashMap<String, Box<VaultKeyDirectory>>>,
|
||||
vaults: Mutex<HashMap<String, Box<dyn VaultKeyDirectory>>>,
|
||||
timestamp: Mutex<Timestamp>,
|
||||
}
|
||||
|
||||
@ -272,12 +272,12 @@ struct Timestamp {
|
||||
|
||||
impl EthMultiStore {
|
||||
/// Open new multi-accounts store with given key directory backend.
|
||||
pub fn open(directory: Box<KeyDirectory>) -> Result<Self, Error> {
|
||||
pub fn open(directory: Box<dyn KeyDirectory>) -> Result<Self, Error> {
|
||||
Self::open_with_iterations(directory, KEY_ITERATIONS as u32)
|
||||
}
|
||||
|
||||
/// Open new multi-accounts store with given key directory backend and custom number of iterations for new keys.
|
||||
pub fn open_with_iterations(directory: Box<KeyDirectory>, iterations: u32) -> Result<Self, Error> {
|
||||
pub fn open_with_iterations(directory: Box<dyn KeyDirectory>, iterations: u32) -> Result<Self, Error> {
|
||||
let store = EthMultiStore {
|
||||
dir: directory,
|
||||
vaults: Mutex::new(HashMap::new()),
|
||||
|
@ -24,7 +24,7 @@ use dir;
|
||||
use Error;
|
||||
|
||||
/// Import an account from a file.
|
||||
pub fn import_account(path: &Path, dst: &KeyDirectory) -> Result<Address, Error> {
|
||||
pub fn import_account(path: &Path, dst: &dyn KeyDirectory) -> Result<Address, Error> {
|
||||
let key_manager = DiskKeyFileManager::default();
|
||||
let existing_accounts = dst.load()?.into_iter().map(|a| a.address).collect::<HashSet<_>>();
|
||||
let filename = path.file_name().and_then(|n| n.to_str()).map(|f| f.to_owned());
|
||||
@ -40,7 +40,7 @@ pub fn import_account(path: &Path, dst: &KeyDirectory) -> Result<Address, Error>
|
||||
}
|
||||
|
||||
/// Import all accounts from one directory to the other.
|
||||
pub fn import_accounts(src: &KeyDirectory, dst: &KeyDirectory) -> Result<Vec<Address>, Error> {
|
||||
pub fn import_accounts(src: &dyn KeyDirectory, dst: &dyn KeyDirectory) -> Result<Vec<Address>, Error> {
|
||||
let accounts = src.load()?;
|
||||
let existing_accounts = dst.load()?.into_iter()
|
||||
.map(|a| a.address)
|
||||
@ -64,7 +64,7 @@ pub fn read_geth_accounts(testnet: bool) -> Vec<Address> {
|
||||
}
|
||||
|
||||
/// Import specific `desired` accounts from the Geth keystore into `dst`.
|
||||
pub fn import_geth_accounts(dst: &KeyDirectory, desired: HashSet<Address>, testnet: bool) -> Result<Vec<Address>, Error> {
|
||||
pub fn import_geth_accounts(dst: &dyn KeyDirectory, desired: HashSet<Address>, testnet: bool) -> Result<Vec<Address>, Error> {
|
||||
let src = RootDiskDirectory::at(dir::geth(testnet));
|
||||
let accounts = src.load()?;
|
||||
let existing_accounts = dst.load()?.into_iter().map(|a| a.address).collect::<HashSet<_>>();
|
||||
|
@ -118,7 +118,7 @@ pub trait SecretStore: SimpleSecretStore {
|
||||
/// Imports existing JSON wallet
|
||||
fn import_wallet(&self, vault: SecretVaultRef, json: &[u8], password: &Password, gen_id: bool) -> Result<StoreAccountRef, Error>;
|
||||
/// Copies account between stores and vaults.
|
||||
fn copy_account(&self, new_store: &SimpleSecretStore, new_vault: SecretVaultRef, account: &StoreAccountRef, password: &Password, new_password: &Password) -> Result<(), Error>;
|
||||
fn copy_account(&self, new_store: &dyn SimpleSecretStore, new_vault: SecretVaultRef, account: &StoreAccountRef, password: &Password, new_password: &Password) -> Result<(), Error>;
|
||||
/// Checks if password matches given account.
|
||||
fn test_password(&self, account: &StoreAccountRef, password: &Password) -> Result<bool, Error>;
|
||||
|
||||
|
@ -64,7 +64,7 @@ pub struct AccountProvider {
|
||||
/// Address book.
|
||||
address_book: RwLock<AddressBook>,
|
||||
/// Accounts on disk
|
||||
sstore: Box<SecretStore>,
|
||||
sstore: Box<dyn SecretStore>,
|
||||
/// Accounts unlocked with rolling tokens
|
||||
transient_sstore: EthMultiStore,
|
||||
/// When unlocking account permanently we additionally keep a raw secret in memory
|
||||
@ -80,7 +80,7 @@ fn transient_sstore() -> EthMultiStore {
|
||||
|
||||
impl AccountProvider {
|
||||
/// Creates new account provider.
|
||||
pub fn new(sstore: Box<SecretStore>, settings: AccountProviderSettings) -> Self {
|
||||
pub fn new(sstore: Box<dyn SecretStore>, settings: AccountProviderSettings) -> Self {
|
||||
if let Ok(accounts) = sstore.accounts() {
|
||||
for account in accounts.into_iter().filter(|a| settings.blacklisted_accounts.contains(&a.address)) {
|
||||
warn!("Local Account {} has a blacklisted (known to be weak) address and will be ignored",
|
||||
|
@ -36,7 +36,7 @@ extern crate log;
|
||||
extern crate matches;
|
||||
|
||||
/// Boxed future response.
|
||||
pub type BoxFuture<T, E> = Box<futures::Future<Item=T, Error=E> + Send>;
|
||||
pub type BoxFuture<T, E> = Box<dyn futures::Future<Item=T, Error=E> + Send>;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
@ -138,7 +138,7 @@ pub enum Error {
|
||||
}
|
||||
|
||||
impl error::Error for Error {
|
||||
fn source(&self) -> Option<&(error::Error + 'static)> {
|
||||
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
|
||||
match self {
|
||||
Error::Io(e) => Some(e),
|
||||
Error::Decoder(e) => Some(e),
|
||||
|
@ -63,7 +63,6 @@ extern crate transaction_pool as txpool;
|
||||
extern crate url;
|
||||
#[macro_use]
|
||||
extern crate log as ethlog;
|
||||
#[macro_use]
|
||||
extern crate ethabi_derive;
|
||||
#[macro_use]
|
||||
extern crate ethabi_contract;
|
||||
@ -205,17 +204,17 @@ impl Signer for KeyPairSigner {
|
||||
|
||||
/// Manager of private transactions
|
||||
pub struct Provider {
|
||||
encryptor: Box<Encryptor>,
|
||||
encryptor: Box<dyn Encryptor>,
|
||||
validator_accounts: HashSet<Address>,
|
||||
signer_account: Option<Address>,
|
||||
notify: RwLock<Vec<Weak<ChainNotify>>>,
|
||||
notify: RwLock<Vec<Weak<dyn ChainNotify>>>,
|
||||
transactions_for_signing: RwLock<SigningStore>,
|
||||
transactions_for_verification: VerificationStore,
|
||||
client: Arc<Client>,
|
||||
miner: Arc<Miner>,
|
||||
accounts: Arc<Signer>,
|
||||
accounts: Arc<dyn Signer>,
|
||||
channel: IoChannel<ClientIoMessage>,
|
||||
keys_provider: Arc<KeyProvider>,
|
||||
keys_provider: Arc<dyn KeyProvider>,
|
||||
logging: Option<Logging>,
|
||||
use_offchain_storage: bool,
|
||||
state_storage: PrivateStateStorage,
|
||||
@ -234,12 +233,12 @@ impl Provider {
|
||||
pub fn new(
|
||||
client: Arc<Client>,
|
||||
miner: Arc<Miner>,
|
||||
accounts: Arc<Signer>,
|
||||
encryptor: Box<Encryptor>,
|
||||
accounts: Arc<dyn Signer>,
|
||||
encryptor: Box<dyn Encryptor>,
|
||||
config: ProviderConfig,
|
||||
channel: IoChannel<ClientIoMessage>,
|
||||
keys_provider: Arc<KeyProvider>,
|
||||
db: Arc<KeyValueDB>,
|
||||
keys_provider: Arc<dyn KeyProvider>,
|
||||
db: Arc<dyn KeyValueDB>,
|
||||
) -> Self {
|
||||
keys_provider.update_acl_contract();
|
||||
Provider {
|
||||
@ -268,11 +267,11 @@ impl Provider {
|
||||
// TODO [ToDr] Don't use `ChainNotify` here!
|
||||
// Better to create a separate notification type for this.
|
||||
/// Adds an actor to be notified on certain events
|
||||
pub fn add_notify(&self, target: Arc<ChainNotify>) {
|
||||
pub fn add_notify(&self, target: Arc<dyn ChainNotify>) {
|
||||
self.notify.write().push(Arc::downgrade(&target));
|
||||
}
|
||||
|
||||
fn notify<F>(&self, f: F) where F: Fn(&ChainNotify) {
|
||||
fn notify<F>(&self, f: F) where F: Fn(&dyn ChainNotify) {
|
||||
for np in self.notify.read().iter() {
|
||||
if let Some(n) = np.upgrade() {
|
||||
f(&*n);
|
||||
|
@ -186,13 +186,13 @@ impl LogsSerializer for FileLogsSerializer {
|
||||
/// Private transactions logging
|
||||
pub struct Logging {
|
||||
logs: RwLock<HashMap<H256, TransactionLog>>,
|
||||
logs_serializer: Arc<LogsSerializer>,
|
||||
logs_serializer: Arc<dyn LogsSerializer>,
|
||||
mono_time: MonoTime,
|
||||
}
|
||||
|
||||
impl Logging {
|
||||
/// Creates the logging object
|
||||
pub fn new(logs_serializer: Arc<LogsSerializer>) -> Self {
|
||||
pub fn new(logs_serializer: Arc<dyn LogsSerializer>) -> Self {
|
||||
let mut logging = Logging {
|
||||
logs: RwLock::new(HashMap::new()),
|
||||
logs_serializer,
|
||||
|
@ -25,12 +25,12 @@ use error::Error;
|
||||
|
||||
/// Wrapper around local db with private state for sync purposes
|
||||
pub struct PrivateStateDB {
|
||||
db: Arc<KeyValueDB>,
|
||||
db: Arc<dyn KeyValueDB>,
|
||||
}
|
||||
|
||||
impl PrivateStateDB {
|
||||
/// Constructs the object
|
||||
pub fn new(db: Arc<KeyValueDB>) -> Self {
|
||||
pub fn new(db: Arc<dyn KeyValueDB>) -> Self {
|
||||
PrivateStateDB {
|
||||
db,
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ pub struct PrivateStateStorage {
|
||||
|
||||
impl PrivateStateStorage {
|
||||
/// Constructs the object
|
||||
pub fn new(db: Arc<KeyValueDB>) -> Self {
|
||||
pub fn new(db: Arc<dyn KeyValueDB>) -> Self {
|
||||
PrivateStateStorage {
|
||||
private_state_db: Arc::new(PrivateStateDB::new(db)),
|
||||
requests: RwLock::new(Vec::new()),
|
||||
|
@ -94,7 +94,7 @@ pub struct ClientService {
|
||||
client: Arc<Client>,
|
||||
snapshot: Arc<SnapshotService>,
|
||||
private_tx: Arc<PrivateTxService>,
|
||||
database: Arc<BlockChainDB>,
|
||||
database: Arc<dyn BlockChainDB>,
|
||||
}
|
||||
|
||||
impl ClientService {
|
||||
@ -102,13 +102,13 @@ impl ClientService {
|
||||
pub fn start(
|
||||
config: ClientConfig,
|
||||
spec: &Spec,
|
||||
blockchain_db: Arc<BlockChainDB>,
|
||||
blockchain_db: Arc<dyn BlockChainDB>,
|
||||
snapshot_path: &Path,
|
||||
restoration_db_handler: Box<BlockChainDBHandler>,
|
||||
restoration_db_handler: Box<dyn BlockChainDBHandler>,
|
||||
_ipc_path: &Path,
|
||||
miner: Arc<Miner>,
|
||||
signer: Arc<Signer>,
|
||||
encryptor: Box<ethcore_private_tx::Encryptor>,
|
||||
signer: Arc<dyn Signer>,
|
||||
encryptor: Box<dyn ethcore_private_tx::Encryptor>,
|
||||
private_tx_conf: ethcore_private_tx::ProviderConfig,
|
||||
private_encryptor_conf: ethcore_private_tx::EncryptorConfig,
|
||||
) -> Result<ClientService, EthcoreError>
|
||||
@ -173,7 +173,7 @@ impl ClientService {
|
||||
}
|
||||
|
||||
/// Get general IO interface
|
||||
pub fn register_io_handler(&self, handler: Arc<IoHandler<ClientIoMessage> + Send>) -> Result<(), IoError> {
|
||||
pub fn register_io_handler(&self, handler: Arc<dyn IoHandler<ClientIoMessage> + Send>) -> Result<(), IoError> {
|
||||
self.io_service.register_handler(handler)
|
||||
}
|
||||
|
||||
@ -198,12 +198,12 @@ impl ClientService {
|
||||
}
|
||||
|
||||
/// Set the actor to be notified on certain chain events
|
||||
pub fn add_notify(&self, notify: Arc<ChainNotify>) {
|
||||
pub fn add_notify(&self, notify: Arc<dyn ChainNotify>) {
|
||||
self.client.add_notify(notify);
|
||||
}
|
||||
|
||||
/// Get a handle to the database.
|
||||
pub fn db(&self) -> Arc<BlockChainDB> { self.database.clone() }
|
||||
pub fn db(&self) -> Arc<dyn BlockChainDB> { self.database.clone() }
|
||||
|
||||
/// Shutdown the Client Service
|
||||
pub fn shutdown(&self) {
|
||||
|
@ -743,7 +743,7 @@ impl SyncHandler {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn on_private_state_data(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> {
|
||||
fn on_private_state_data(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> {
|
||||
if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) {
|
||||
trace!(target: "sync", "{} Ignoring packet from unconfirmed/unknown peer", peer_id);
|
||||
return Ok(());
|
||||
|
@ -1382,7 +1382,7 @@ impl ChainSync {
|
||||
}
|
||||
|
||||
/// Request private state from peers
|
||||
pub fn request_private_state(&mut self, io: &mut SyncIo, hash: &H256) {
|
||||
pub fn request_private_state(&mut self, io: &mut dyn SyncIo, hash: &H256) {
|
||||
let private_state_peers = self.get_private_state_peers();
|
||||
if private_state_peers.is_empty() {
|
||||
error!(target: "privatetx", "Cannot request private state, no peers with private tx enabled available");
|
||||
|
@ -100,7 +100,7 @@ impl SyncRequester {
|
||||
SyncRequester::send_request(sync, io, peer_id, PeerAsking::SnapshotManifest, GetSnapshotManifestPacket, rlp.out());
|
||||
}
|
||||
|
||||
pub fn request_private_state(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, hash: &H256) {
|
||||
pub fn request_private_state(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, hash: &H256) {
|
||||
trace!(target: "privatetx", "{} <- GetPrivateStatePacket", peer_id);
|
||||
let mut rlp = RlpStream::new_list(1);
|
||||
rlp.append(hash);
|
||||
|
@ -356,7 +356,7 @@ impl SyncSupplier {
|
||||
}
|
||||
|
||||
/// Respond to GetPrivateStatePacket
|
||||
fn return_private_state(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult {
|
||||
fn return_private_state(io: &dyn SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult {
|
||||
let hash: H256 = r.val_at(0)?;
|
||||
trace!(target: "privatetx", "{} -> GetPrivateStatePacket {:?}", peer_id, hash);
|
||||
io.private_state().map_or(Ok(None), |db| {
|
||||
|
@ -107,11 +107,11 @@ pub trait NodeInfo: Send + Sync {
|
||||
|
||||
/// Create a new local data store, given a database, a column to write to, and a node.
|
||||
/// Attempts to read data out of the store, and move it into the node.
|
||||
pub fn create<T: NodeInfo>(db: Arc<KeyValueDB>, col: Option<u32>, node: T) -> LocalDataStore<T> {
|
||||
pub fn create<T: NodeInfo>(db: Arc<dyn KeyValueDB>, col: Option<u32>, node: T) -> LocalDataStore<T> {
|
||||
LocalDataStore {
|
||||
db: db,
|
||||
col: col,
|
||||
node: node,
|
||||
db,
|
||||
col,
|
||||
node,
|
||||
}
|
||||
}
|
||||
|
||||
@ -120,7 +120,7 @@ pub fn create<T: NodeInfo>(db: Arc<KeyValueDB>, col: Option<u32>, node: T) -> Lo
|
||||
/// In specific, this will be used to store things like unpropagated local transactions
|
||||
/// and the node security level.
|
||||
pub struct LocalDataStore<T: NodeInfo> {
|
||||
db: Arc<KeyValueDB>,
|
||||
db: Arc<dyn KeyValueDB>,
|
||||
col: Option<u32>,
|
||||
node: T,
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ pub struct Stratum {
|
||||
impl Stratum {
|
||||
pub fn start(
|
||||
addr: &SocketAddr,
|
||||
dispatcher: Arc<JobDispatcher>,
|
||||
dispatcher: Arc<dyn JobDispatcher>,
|
||||
secret: Option<H256>,
|
||||
) -> Result<Arc<Stratum>, Error> {
|
||||
|
||||
@ -128,7 +128,7 @@ struct StratumImpl {
|
||||
/// List of workers supposed to receive job update
|
||||
job_que: RwLock<HashSet<SocketAddr>>,
|
||||
/// Payload manager
|
||||
dispatcher: Arc<JobDispatcher>,
|
||||
dispatcher: Arc<dyn JobDispatcher>,
|
||||
/// Authorized workers (socket - worker_id)
|
||||
workers: Arc<RwLock<HashMap<SocketAddr, String>>>,
|
||||
/// Secret if any
|
||||
|
@ -199,14 +199,14 @@ mod accounts {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn private_tx_signer(accounts: Arc<AccountProvider>, passwords: &[Password]) -> Result<Arc<::ethcore_private_tx::Signer>, String> {
|
||||
pub fn private_tx_signer(accounts: Arc<AccountProvider>, passwords: &[Password]) -> Result<Arc<dyn (ethcore_private_tx::Signer)>, String> {
|
||||
Ok(Arc::new(self::private_tx::AccountSigner {
|
||||
accounts,
|
||||
passwords: passwords.to_vec(),
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn accounts_list(account_provider: Arc<AccountProvider>) -> Arc<Fn() -> Vec<Address> + Send + Sync> {
|
||||
pub fn accounts_list(account_provider: Arc<AccountProvider>) -> Arc<dyn Fn() -> Vec<Address> + Send + Sync> {
|
||||
Arc::new(move || account_provider.accounts().unwrap_or_default())
|
||||
}
|
||||
|
||||
|
@ -244,7 +244,7 @@ fn execute_import_light(cmd: ImportBlockchain) -> Result<(), String> {
|
||||
|
||||
let client = service.client();
|
||||
|
||||
let mut instream: Box<io::Read> = match cmd.file_path {
|
||||
let mut instream: Box<dyn io::Read> = match cmd.file_path {
|
||||
Some(f) => Box::new(fs::File::open(&f).map_err(|_| format!("Cannot open given file: {}", f))?),
|
||||
None => Box::new(io::stdin()),
|
||||
};
|
||||
@ -412,7 +412,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result<(), String> {
|
||||
|
||||
let client = service.client();
|
||||
|
||||
let mut instream: Box<io::Read> = match cmd.file_path {
|
||||
let mut instream: Box<dyn io::Read> = match cmd.file_path {
|
||||
Some(f) => Box::new(fs::File::open(&f).map_err(|_| format!("Cannot open given file: {}", f))?),
|
||||
None => Box::new(io::stdin()),
|
||||
};
|
||||
@ -621,7 +621,7 @@ fn execute_export(cmd: ExportBlockchain) -> Result<(), String> {
|
||||
|
||||
let client = service.client();
|
||||
|
||||
let mut out: Box<io::Write> = match cmd.file_path {
|
||||
let mut out: Box<dyn io::Write> = match cmd.file_path {
|
||||
Some(f) => Box::new(fs::File::create(&f).map_err(|_| format!("Cannot write to file given: {}", f))?),
|
||||
None => Box::new(io::stdout()),
|
||||
};
|
||||
@ -665,7 +665,7 @@ fn execute_export_state(cmd: ExportState) -> Result<(), String> {
|
||||
|
||||
let client = service.client();
|
||||
|
||||
let mut out: Box<io::Write> = match cmd.file_path {
|
||||
let mut out: Box<dyn io::Write> = match cmd.file_path {
|
||||
Some(f) => Box::new(fs::File::create(&f).map_err(|_| format!("Cannot write to file given: {}", f))?),
|
||||
None => Box::new(io::stdout()),
|
||||
};
|
||||
|
@ -37,13 +37,13 @@ mod helpers;
|
||||
pub use self::migration::migrate;
|
||||
|
||||
struct AppDB {
|
||||
key_value: Arc<KeyValueDB>,
|
||||
key_value: Arc<dyn KeyValueDB>,
|
||||
blooms: blooms_db::Database,
|
||||
trace_blooms: blooms_db::Database,
|
||||
}
|
||||
|
||||
impl BlockChainDB for AppDB {
|
||||
fn key_value(&self) -> &Arc<KeyValueDB> {
|
||||
fn key_value(&self) -> &Arc<dyn KeyValueDB> {
|
||||
&self.key_value
|
||||
}
|
||||
|
||||
@ -58,7 +58,7 @@ impl BlockChainDB for AppDB {
|
||||
|
||||
/// Open a secret store DB using the given secret store data path. The DB path is one level beneath the data path.
|
||||
#[cfg(feature = "secretstore")]
|
||||
pub fn open_secretstore_db(data_path: &str) -> Result<Arc<KeyValueDB>, String> {
|
||||
pub fn open_secretstore_db(data_path: &str) -> Result<Arc<dyn KeyValueDB>, String> {
|
||||
use std::path::PathBuf;
|
||||
|
||||
let mut db_path = PathBuf::from(data_path);
|
||||
@ -68,7 +68,7 @@ pub fn open_secretstore_db(data_path: &str) -> Result<Arc<KeyValueDB>, String> {
|
||||
}
|
||||
|
||||
/// Create a restoration db handler using the config generated by `client_path` and `client_config`.
|
||||
pub fn restoration_db_handler(client_path: &Path, client_config: &ClientConfig) -> Box<BlockChainDBHandler> {
|
||||
pub fn restoration_db_handler(client_path: &Path, client_config: &ClientConfig) -> Box<dyn BlockChainDBHandler> {
|
||||
let client_db_config = helpers::client_db_config(client_path, client_config);
|
||||
|
||||
struct RestorationDBHandler {
|
||||
@ -76,7 +76,7 @@ pub fn restoration_db_handler(client_path: &Path, client_config: &ClientConfig)
|
||||
}
|
||||
|
||||
impl BlockChainDBHandler for RestorationDBHandler {
|
||||
fn open(&self, db_path: &Path) -> io::Result<Arc<BlockChainDB>> {
|
||||
fn open(&self, db_path: &Path) -> io::Result<Arc<dyn BlockChainDB>> {
|
||||
open_database(&db_path.to_string_lossy(), &self.config)
|
||||
}
|
||||
}
|
||||
@ -87,7 +87,11 @@ pub fn restoration_db_handler(client_path: &Path, client_config: &ClientConfig)
|
||||
}
|
||||
|
||||
/// Open a new main DB.
|
||||
pub fn open_db(client_path: &str, cache_config: &CacheConfig, compaction: &DatabaseCompactionProfile) -> io::Result<Arc<BlockChainDB>> {
|
||||
pub fn open_db(
|
||||
client_path: &str,
|
||||
cache_config: &CacheConfig,
|
||||
compaction: &DatabaseCompactionProfile
|
||||
) -> io::Result<Arc<dyn BlockChainDB>> {
|
||||
let path = Path::new(client_path);
|
||||
|
||||
let db_config = DatabaseConfig {
|
||||
@ -99,7 +103,7 @@ pub fn open_db(client_path: &str, cache_config: &CacheConfig, compaction: &Datab
|
||||
open_database(client_path, &db_config)
|
||||
}
|
||||
|
||||
pub fn open_database(client_path: &str, config: &DatabaseConfig) -> io::Result<Arc<BlockChainDB>> {
|
||||
pub fn open_database(client_path: &str, config: &DatabaseConfig) -> io::Result<Arc<dyn BlockChainDB>> {
|
||||
let path = Path::new(client_path);
|
||||
|
||||
let blooms_path = path.join("blooms");
|
||||
|
@ -124,8 +124,8 @@ pub trait InformantData: Send + Sync {
|
||||
/// Informant data for a full node.
|
||||
pub struct FullNodeInformantData {
|
||||
pub client: Arc<Client>,
|
||||
pub sync: Option<Arc<SyncProvider>>,
|
||||
pub net: Option<Arc<ManageNetwork>>,
|
||||
pub sync: Option<Arc<dyn SyncProvider>>,
|
||||
pub net: Option<Arc<dyn ManageNetwork>>,
|
||||
}
|
||||
|
||||
impl InformantData for FullNodeInformantData {
|
||||
@ -180,7 +180,7 @@ impl InformantData for FullNodeInformantData {
|
||||
|
||||
/// Informant data for a light node -- note that the network is required.
|
||||
pub struct LightNodeInformantData {
|
||||
pub client: Arc<LightChainClient>,
|
||||
pub client: Arc<dyn LightChainClient>,
|
||||
pub sync: Arc<LightSync>,
|
||||
pub cache: Arc<Mutex<LightDataCache>>,
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ impl Default for Configuration {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn start_server(conf: Configuration, client: Arc<BlockChainClient>) -> Result<Option<Listening>, ServerError> {
|
||||
pub fn start_server(conf: Configuration, client: Arc<dyn BlockChainClient>) -> Result<Option<Listening>, ServerError> {
|
||||
if !conf.enabled {
|
||||
return Ok(None);
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ use ethereum_types::H256;
|
||||
|
||||
const ALL_VALID_BACKREFS: &str = "no back-references, therefore all back-references valid; qed";
|
||||
|
||||
type BoxFuture<T, E> = Box<Future<Item = T, Error = E>>;
|
||||
type BoxFuture<T, E> = Box<dyn Future<Item = T, Error = E>>;
|
||||
|
||||
/// Allows on-demand fetch of data useful for the light client.
|
||||
pub struct EpochFetch {
|
||||
@ -83,7 +83,7 @@ impl ChainDataFetcher for EpochFetch {
|
||||
}
|
||||
|
||||
/// Fetch epoch transition proof at given header.
|
||||
fn epoch_transition(&self, hash: H256, engine: Arc<Engine>, checker: Arc<StateDependentProof>)
|
||||
fn epoch_transition(&self, hash: H256, engine: Arc<dyn Engine>, checker: Arc<dyn StateDependentProof>)
|
||||
-> Self::Transition
|
||||
{
|
||||
self.request(request::Signal {
|
||||
|
@ -28,9 +28,9 @@ pub use ethcore::client::ChainNotify;
|
||||
use ethcore_logger::Config as LogConfig;
|
||||
|
||||
pub type SyncModules = (
|
||||
Arc<SyncProvider>,
|
||||
Arc<ManageNetwork>,
|
||||
Arc<ChainNotify>,
|
||||
Arc<dyn SyncProvider>,
|
||||
Arc<dyn ManageNetwork>,
|
||||
Arc<dyn ChainNotify>,
|
||||
mpsc::Sender<sync::PriorityTask>,
|
||||
);
|
||||
|
||||
@ -38,13 +38,13 @@ pub fn sync(
|
||||
config: SyncConfig,
|
||||
executor: Executor,
|
||||
network_config: NetworkConfiguration,
|
||||
chain: Arc<BlockChainClient>,
|
||||
snapshot_service: Arc<SnapshotService>,
|
||||
private_tx_handler: Option<Arc<PrivateTxHandler>>,
|
||||
chain: Arc<dyn BlockChainClient>,
|
||||
snapshot_service: Arc<dyn SnapshotService>,
|
||||
private_tx_handler: Option<Arc<dyn PrivateTxHandler>>,
|
||||
private_state: Option<Arc<PrivateStateDB>>,
|
||||
provider: Arc<Provider>,
|
||||
provider: Arc<dyn Provider>,
|
||||
_log_settings: &LogConfig,
|
||||
connection_filter: Option<Arc<ConnectionFilter>>,
|
||||
connection_filter: Option<Arc<dyn ConnectionFilter>>,
|
||||
) -> Result<SyncModules, sync::Error> {
|
||||
let eth_sync = EthSync::new(Params {
|
||||
config,
|
||||
@ -59,9 +59,9 @@ pub fn sync(
|
||||
connection_filter)?;
|
||||
|
||||
Ok((
|
||||
eth_sync.clone() as Arc<SyncProvider>,
|
||||
eth_sync.clone() as Arc<ManageNetwork>,
|
||||
eth_sync.clone() as Arc<ChainNotify>,
|
||||
eth_sync.clone() as Arc<dyn SyncProvider>,
|
||||
eth_sync.clone() as Arc<dyn ManageNetwork>,
|
||||
eth_sync.clone() as Arc<dyn ChainNotify>,
|
||||
eth_sync.priority_tasks()
|
||||
))
|
||||
}
|
||||
|
@ -235,16 +235,16 @@ pub trait Dependencies {
|
||||
pub struct FullDependencies {
|
||||
pub signer_service: Arc<SignerService>,
|
||||
pub client: Arc<Client>,
|
||||
pub snapshot: Arc<SnapshotService>,
|
||||
pub sync: Arc<SyncProvider>,
|
||||
pub net: Arc<ManageNetwork>,
|
||||
pub snapshot: Arc<dyn SnapshotService>,
|
||||
pub sync: Arc<dyn SyncProvider>,
|
||||
pub net: Arc<dyn ManageNetwork>,
|
||||
pub accounts: Arc<AccountProvider>,
|
||||
pub private_tx_service: Option<Arc<PrivateTxService>>,
|
||||
pub miner: Arc<Miner>,
|
||||
pub external_miner: Arc<ExternalMiner>,
|
||||
pub logger: Arc<RotatingLogger>,
|
||||
pub settings: Arc<NetworkSettings>,
|
||||
pub net_service: Arc<ManageNetwork>,
|
||||
pub net_service: Arc<dyn ManageNetwork>,
|
||||
pub updater: Arc<Updater>,
|
||||
pub geth_compatibility: bool,
|
||||
pub experimental_rpcs: bool,
|
||||
@ -485,7 +485,7 @@ pub struct LightDependencies<T> {
|
||||
pub signer_service: Arc<SignerService>,
|
||||
pub client: Arc<T>,
|
||||
pub sync: Arc<LightSync>,
|
||||
pub net: Arc<ManageNetwork>,
|
||||
pub net: Arc<dyn ManageNetwork>,
|
||||
pub accounts: Arc<AccountProvider>,
|
||||
pub logger: Arc<RotatingLogger>,
|
||||
pub settings: Arc<NetworkSettings>,
|
||||
|
@ -583,7 +583,7 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
|
||||
// take handle to private transactions service
|
||||
let private_tx_service = service.private_tx_service();
|
||||
let private_tx_provider = private_tx_service.provider();
|
||||
let connection_filter = connection_filter_address.map(|a| Arc::new(NodeFilter::new(Arc::downgrade(&client) as Weak<BlockChainClient>, a)));
|
||||
let connection_filter = connection_filter_address.map(|a| Arc::new(NodeFilter::new(Arc::downgrade(&client) as Weak<dyn BlockChainClient>, a)));
|
||||
let snapshot_service = service.snapshot_service();
|
||||
if let Some(filter) = connection_filter.clone() {
|
||||
service.add_notify(filter.clone());
|
||||
@ -636,7 +636,7 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
|
||||
}
|
||||
|
||||
let (private_tx_sync, private_state) = match cmd.private_tx_enabled {
|
||||
true => (Some(private_tx_service.clone() as Arc<PrivateTxHandler>), Some(private_tx_provider.private_state_db())),
|
||||
true => (Some(private_tx_service.clone() as Arc<dyn PrivateTxHandler>), Some(private_tx_provider.private_state_db())),
|
||||
false => (None, None),
|
||||
};
|
||||
|
||||
@ -651,7 +651,7 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
|
||||
private_state,
|
||||
client.clone(),
|
||||
&cmd.logger_config,
|
||||
connection_filter.clone().map(|f| f as Arc<::sync::ConnectionFilter + 'static>),
|
||||
connection_filter.clone().map(|f| f as Arc<dyn sync::ConnectionFilter + 'static>),
|
||||
).map_err(|e| format!("Sync error: {}", e))?;
|
||||
|
||||
service.add_notify(chain_notify.clone());
|
||||
@ -706,7 +706,7 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
|
||||
// the updater service
|
||||
let updater_fetch = fetch.clone();
|
||||
let updater = Updater::new(
|
||||
&Arc::downgrade(&(service.client() as Arc<BlockChainClient>)),
|
||||
&Arc::downgrade(&(service.client() as Arc<dyn BlockChainClient>)),
|
||||
&Arc::downgrade(&sync_provider),
|
||||
update_policy,
|
||||
hash_fetch::Client::with_fetch(contract_client.clone(), updater_fetch, runtime.executor())
|
||||
@ -843,14 +843,14 @@ enum RunningClientInner {
|
||||
rpc: jsonrpc_core::MetaIoHandler<Metadata, informant::Middleware<rpc_apis::LightClientNotifier>>,
|
||||
informant: Arc<Informant<LightNodeInformantData>>,
|
||||
client: Arc<LightClient>,
|
||||
keep_alive: Box<Any>,
|
||||
keep_alive: Box<dyn Any>,
|
||||
},
|
||||
Full {
|
||||
rpc: jsonrpc_core::MetaIoHandler<Metadata, informant::Middleware<informant::ClientNotifier>>,
|
||||
informant: Arc<Informant<FullNodeInformantData>>,
|
||||
client: Arc<Client>,
|
||||
client_service: Arc<ClientService>,
|
||||
keep_alive: Box<Any>,
|
||||
keep_alive: Box<dyn Any>,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -93,7 +93,7 @@ pub struct Dependencies<'a> {
|
||||
/// Blockchain client.
|
||||
pub client: Arc<Client>,
|
||||
/// Sync provider.
|
||||
pub sync: Arc<SyncProvider>,
|
||||
pub sync: Arc<dyn SyncProvider>,
|
||||
/// Miner service.
|
||||
pub miner: Arc<Miner>,
|
||||
/// Account provider.
|
||||
|
@ -64,7 +64,6 @@ parity-updater = { path = "../updater" }
|
||||
parity-version = { path = "../util/version" }
|
||||
rlp = "0.4.0"
|
||||
account-state = { path = "../ethcore/account-state" }
|
||||
spec = { path = "../ethcore/spec" }
|
||||
stats = { path = "../util/stats" }
|
||||
trace = { path = "../ethcore/trace" }
|
||||
vm = { path = "../ethcore/vm" }
|
||||
@ -77,6 +76,7 @@ ethcore-io = { path = "../util/io" }
|
||||
ethcore-network = { path = "../util/network" }
|
||||
fake-fetch = { path = "../util/fake-fetch" }
|
||||
macros = { path = "../util/macros" }
|
||||
spec = { path = "../ethcore/spec" }
|
||||
pretty_assertions = "0.1"
|
||||
transaction-pool = "2.0"
|
||||
|
||||
|
@ -85,7 +85,7 @@ extern crate parity_version as version;
|
||||
extern crate eip_712;
|
||||
extern crate rlp;
|
||||
extern crate account_state;
|
||||
extern crate spec;
|
||||
|
||||
extern crate stats;
|
||||
extern crate tempdir;
|
||||
extern crate trace;
|
||||
@ -127,6 +127,9 @@ extern crate fake_fetch;
|
||||
#[cfg(test)]
|
||||
extern crate ethcore_io as io;
|
||||
|
||||
#[cfg(test)]
|
||||
extern crate spec;
|
||||
|
||||
pub extern crate jsonrpc_ws_server as ws;
|
||||
|
||||
mod authcodes;
|
||||
|
@ -38,7 +38,7 @@ const PRINT_INTERVAL: Duration = Duration::from_secs(60);
|
||||
pub struct DeprecationNotice<T = fn() -> Instant> {
|
||||
now: T,
|
||||
next_warning_at: RwLock<HashMap<String, Instant>>,
|
||||
printer: Box<Fn(MethodName, Option<&str>) + Send + Sync>,
|
||||
printer: Box<dyn Fn(MethodName, Option<&str>) + Send + Sync>,
|
||||
}
|
||||
|
||||
impl Default for DeprecationNotice {
|
||||
|
@ -118,7 +118,7 @@ impl<C: miner::BlockChainClient + BlockChainClient, M: MinerService> Dispatcher
|
||||
fn sign<P>(
|
||||
&self,
|
||||
filled: FilledTransactionRequest,
|
||||
signer: &Arc<Accounts>,
|
||||
signer: &Arc<dyn Accounts>,
|
||||
password: SignWith,
|
||||
post_sign: P,
|
||||
) -> BoxFuture<P::Item>
|
||||
|
@ -45,7 +45,7 @@ where
|
||||
/// Sync service.
|
||||
pub sync: Arc<S>,
|
||||
/// Header chain client.
|
||||
pub client: Arc<LightChainClient>,
|
||||
pub client: Arc<dyn LightChainClient>,
|
||||
/// On-demand request service.
|
||||
pub on_demand: Arc<OD>,
|
||||
/// Data cache.
|
||||
@ -68,7 +68,7 @@ where
|
||||
/// For correct operation, the OnDemand service is assumed to be registered as a network handler,
|
||||
pub fn new(
|
||||
sync: Arc<S>,
|
||||
client: Arc<LightChainClient>,
|
||||
client: Arc<dyn LightChainClient>,
|
||||
on_demand: Arc<OD>,
|
||||
cache: Arc<Mutex<LightDataCache>>,
|
||||
transaction_queue: Arc<RwLock<LightTransactionQueue>>,
|
||||
@ -215,7 +215,7 @@ where
|
||||
fn sign<P>(
|
||||
&self,
|
||||
filled: FilledTransactionRequest,
|
||||
signer: &Arc<Accounts>,
|
||||
signer: &Arc<dyn Accounts>,
|
||||
password: SignWith,
|
||||
post_sign: P
|
||||
) -> BoxFuture<P::Item>
|
||||
@ -248,7 +248,7 @@ where
|
||||
// TODO: this could be `impl Trait`.
|
||||
pub fn fetch_gas_price_corpus<S, OD>(
|
||||
sync: Arc<S>,
|
||||
client: Arc<LightChainClient>,
|
||||
client: Arc<dyn LightChainClient>,
|
||||
on_demand: Arc<OD>,
|
||||
cache: Arc<Mutex<LightDataCache>>,
|
||||
) -> BoxFuture<Corpus<U256>>
|
||||
|
@ -111,7 +111,7 @@ pub trait Dispatcher: Send + Sync + Clone {
|
||||
fn sign<P>(
|
||||
&self,
|
||||
filled: FilledTransactionRequest,
|
||||
signer: &Arc<Accounts>,
|
||||
signer: &Arc<dyn Accounts>,
|
||||
password: SignWith,
|
||||
post_sign: P,
|
||||
) -> BoxFuture<P::Item> where
|
||||
@ -277,7 +277,7 @@ impl<T: Debug> From<(T, Option<AccountToken>)> for WithToken<T> {
|
||||
/// Execute a confirmation payload.
|
||||
pub fn execute<D: Dispatcher + 'static>(
|
||||
dispatcher: D,
|
||||
signer: &Arc<Accounts>,
|
||||
signer: &Arc<dyn Accounts>,
|
||||
payload: ConfirmationPayload,
|
||||
pass: SignWith
|
||||
) -> BoxFuture<WithToken<ConfirmationResponse>> {
|
||||
|
@ -32,7 +32,7 @@ enum ProspectiveSignerState {
|
||||
}
|
||||
|
||||
pub struct ProspectiveSigner<P: PostSign> {
|
||||
signer: Arc<Accounts>,
|
||||
signer: Arc<dyn Accounts>,
|
||||
filled: FilledTransactionRequest,
|
||||
chain_id: Option<u64>,
|
||||
reserved: nonce::Reserved,
|
||||
@ -46,7 +46,7 @@ pub struct ProspectiveSigner<P: PostSign> {
|
||||
|
||||
impl<P: PostSign> ProspectiveSigner<P> {
|
||||
pub fn new(
|
||||
signer: Arc<Accounts>,
|
||||
signer: Arc<dyn Accounts>,
|
||||
filled: FilledTransactionRequest,
|
||||
chain_id: Option<u64>,
|
||||
reserved: nonce::Reserved,
|
||||
|
@ -30,7 +30,7 @@ pub use self::signing_queue::QueueEvent;
|
||||
pub struct SignerService {
|
||||
is_enabled: bool,
|
||||
queue: Arc<ConfirmationsQueue>,
|
||||
generate_new_token: Box<Fn() -> Result<String, String> + Send + Sync + 'static>,
|
||||
generate_new_token: Box<dyn Fn() -> Result<String, String> + Send + Sync + 'static>,
|
||||
}
|
||||
|
||||
impl SignerService {
|
||||
|
@ -96,7 +96,7 @@ pub type ConfirmationReceiver = oneshot::Receiver<ConfirmationResult>;
|
||||
pub struct ConfirmationsQueue {
|
||||
id: Mutex<U256>,
|
||||
queue: RwLock<BTreeMap<U256, ConfirmationSender>>,
|
||||
on_event: RwLock<Vec<Box<Fn(QueueEvent) -> () + Send + Sync>>>,
|
||||
on_event: RwLock<Vec<Box<dyn Fn(QueueEvent) -> () + Send + Sync>>>,
|
||||
}
|
||||
|
||||
impl ConfirmationsQueue {
|
||||
|
@ -82,7 +82,7 @@ where
|
||||
OD: OnDemandRequester + 'static
|
||||
{
|
||||
/// The light client.
|
||||
pub client: Arc<LightChainClient>,
|
||||
pub client: Arc<dyn LightChainClient>,
|
||||
/// The on-demand request service.
|
||||
pub on_demand: Arc<OD>,
|
||||
/// Handle to the network.
|
||||
@ -585,7 +585,7 @@ where
|
||||
|
||||
match maybe_future {
|
||||
Some(recv) => recv,
|
||||
None => Box::new(future::err(errors::network_disabled())) as Box<Future<Item = _, Error = _> + Send>
|
||||
None => Box::new(future::err(errors::network_disabled())) as Box<dyn Future<Item = _, Error = _> + Send>
|
||||
}
|
||||
}
|
||||
|
||||
@ -741,7 +741,7 @@ where
|
||||
tx: EthTransaction,
|
||||
hdr: encoded::Header,
|
||||
env_info: ::vm::EnvInfo,
|
||||
engine: Arc<engine::Engine>,
|
||||
engine: Arc<dyn engine::Engine>,
|
||||
on_demand: Arc<OD>,
|
||||
sync: Arc<S>,
|
||||
}
|
||||
@ -806,7 +806,7 @@ where
|
||||
failed => Ok(future::Loop::Break(failed)),
|
||||
}
|
||||
})
|
||||
})) as Box<Future<Item = _, Error = _> + Send>
|
||||
})) as Box<dyn Future<Item = _, Error = _> + Send>
|
||||
} else {
|
||||
trace!(target: "light_fetch", "Placing execution request for {} gas in on_demand",
|
||||
params.tx.gas);
|
||||
@ -827,8 +827,8 @@ where
|
||||
});
|
||||
|
||||
match proved_future {
|
||||
Some(fut) => Box::new(fut) as Box<Future<Item = _, Error = _> + Send>,
|
||||
None => Box::new(future::err(errors::network_disabled())) as Box<Future<Item = _, Error = _> + Send>,
|
||||
Some(fut) => Box::new(fut) as Box<dyn Future<Item = _, Error = _> + Send>,
|
||||
None => Box::new(future::err(errors::network_disabled())) as Box<dyn Future<Item = _, Error = _> + Send>,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ impl<S: core::Middleware<Metadata>> GenericPollManager<S> {
|
||||
}).is_some()
|
||||
}
|
||||
|
||||
pub fn tick(&self) -> Box<Future<Item=(), Error=()> + Send> {
|
||||
pub fn tick(&self) -> Box<dyn Future<Item=(), Error=()> + Send> {
|
||||
let mut futures = Vec::new();
|
||||
// poll all subscriptions
|
||||
for (id, subscription) in self.subscribers.iter() {
|
||||
|
@ -113,7 +113,7 @@ pub struct EthClient<C, SN: ?Sized, S: ?Sized, M, EM> where
|
||||
client: Arc<C>,
|
||||
snapshot: Arc<SN>,
|
||||
sync: Arc<S>,
|
||||
accounts: Arc<Fn() -> Vec<Address> + Send + Sync>,
|
||||
accounts: Arc<dyn Fn() -> Vec<Address> + Send + Sync>,
|
||||
miner: Arc<M>,
|
||||
external_miner: Arc<EM>,
|
||||
seed_compute: Mutex<SeedHashCompute>,
|
||||
@ -193,7 +193,7 @@ impl<C, SN: ?Sized, S: ?Sized, M, EM, T: StateInfo + 'static> EthClient<C, SN, S
|
||||
client: &Arc<C>,
|
||||
snapshot: &Arc<SN>,
|
||||
sync: &Arc<S>,
|
||||
accounts: &Arc<Fn() -> Vec<Address> + Send + Sync>,
|
||||
accounts: &Arc<dyn Fn() -> Vec<Address> + Send + Sync>,
|
||||
miner: &Arc<M>,
|
||||
em: &Arc<EM>,
|
||||
options: EthClientOptions
|
||||
@ -449,8 +449,8 @@ impl<C, SN: ?Sized, S: ?Sized, M, EM, T: StateInfo + 'static> EthClient<C, SN, S
|
||||
|
||||
self.miner
|
||||
.pending_state(info.best_block_number)
|
||||
.map(|s| Box::new(s) as Box<StateInfo>)
|
||||
.unwrap_or(Box::new(self.client.latest_state()) as Box<StateInfo>)
|
||||
.map(|s| Box::new(s) as Box<dyn StateInfo>)
|
||||
.unwrap_or(Box::new(self.client.latest_state()) as Box<dyn StateInfo>)
|
||||
.into()
|
||||
}
|
||||
}
|
||||
|
@ -137,7 +137,7 @@ where
|
||||
{
|
||||
/// Creates a new `EthPubSubClient` for `LightClient`.
|
||||
pub fn light(
|
||||
client: Arc<LightChainClient>,
|
||||
client: Arc<dyn LightChainClient>,
|
||||
on_demand: Arc<OD>,
|
||||
sync: Arc<S>,
|
||||
cache: Arc<Mutex<Cache>>,
|
||||
|
@ -58,7 +58,7 @@ pub struct EthClient<C, S: LightSyncProvider + LightNetworkDispatcher + 'static,
|
||||
client: Arc<C>,
|
||||
on_demand: Arc<OD>,
|
||||
transaction_queue: Arc<RwLock<TransactionQueue>>,
|
||||
accounts: Arc<Fn() -> Vec<Address> + Send + Sync>,
|
||||
accounts: Arc<dyn Fn() -> Vec<Address> + Send + Sync>,
|
||||
cache: Arc<Mutex<LightDataCache>>,
|
||||
polls: Mutex<PollManager<SyncPollFilter>>,
|
||||
poll_lifetime: u32,
|
||||
@ -101,7 +101,7 @@ where
|
||||
client: Arc<C>,
|
||||
on_demand: Arc<OD>,
|
||||
transaction_queue: Arc<RwLock<TransactionQueue>>,
|
||||
accounts: Arc<Fn() -> Vec<Address> + Send + Sync>,
|
||||
accounts: Arc<dyn Fn() -> Vec<Address> + Send + Sync>,
|
||||
cache: Arc<Mutex<LightDataCache>>,
|
||||
gas_price_percentile: usize,
|
||||
poll_lifetime: u32
|
||||
|
@ -34,14 +34,14 @@ use v1::types::{Bytes, ReleaseInfo, Transaction};
|
||||
|
||||
/// Parity-specific rpc interface for operations altering the settings.
|
||||
pub struct ParitySetClient<F> {
|
||||
client: Arc<LightChainClient>,
|
||||
net: Arc<ManageNetwork>,
|
||||
client: Arc<dyn LightChainClient>,
|
||||
net: Arc<dyn ManageNetwork>,
|
||||
fetch: F,
|
||||
}
|
||||
|
||||
impl<F: Fetch> ParitySetClient<F> {
|
||||
/// Creates new `ParitySetClient` with given `Fetch`.
|
||||
pub fn new(client: Arc<LightChainClient>, net: Arc<ManageNetwork>, fetch: F) -> Self {
|
||||
pub fn new(client: Arc<dyn LightChainClient>, net: Arc<dyn ManageNetwork>, fetch: F) -> Self {
|
||||
ParitySetClient {
|
||||
client,
|
||||
net,
|
||||
|
@ -59,13 +59,13 @@ pub struct ParityClient<C, M, U> {
|
||||
client: Arc<C>,
|
||||
miner: Arc<M>,
|
||||
updater: Arc<U>,
|
||||
sync: Arc<SyncProvider>,
|
||||
net: Arc<ManageNetwork>,
|
||||
sync: Arc<dyn SyncProvider>,
|
||||
net: Arc<dyn ManageNetwork>,
|
||||
logger: Arc<RotatingLogger>,
|
||||
settings: Arc<NetworkSettings>,
|
||||
signer: Option<Arc<SignerService>>,
|
||||
ws_address: Option<Host>,
|
||||
snapshot: Option<Arc<SnapshotService>>,
|
||||
snapshot: Option<Arc<dyn SnapshotService>>,
|
||||
}
|
||||
|
||||
impl<C, M, U> ParityClient<C, M, U> where
|
||||
@ -75,14 +75,14 @@ impl<C, M, U> ParityClient<C, M, U> where
|
||||
pub fn new(
|
||||
client: Arc<C>,
|
||||
miner: Arc<M>,
|
||||
sync: Arc<SyncProvider>,
|
||||
sync: Arc<dyn SyncProvider>,
|
||||
updater: Arc<U>,
|
||||
net: Arc<ManageNetwork>,
|
||||
net: Arc<dyn ManageNetwork>,
|
||||
logger: Arc<RotatingLogger>,
|
||||
settings: Arc<NetworkSettings>,
|
||||
signer: Option<Arc<SignerService>>,
|
||||
ws_address: Option<Host>,
|
||||
snapshot: Option<Arc<SnapshotService>>,
|
||||
snapshot: Option<Arc<dyn SnapshotService>>,
|
||||
) -> Self {
|
||||
ParityClient {
|
||||
client,
|
||||
|
@ -87,7 +87,7 @@ pub struct ParitySetClient<C, M, U, F = fetch::Client> {
|
||||
client: Arc<C>,
|
||||
miner: Arc<M>,
|
||||
updater: Arc<U>,
|
||||
net: Arc<ManageNetwork>,
|
||||
net: Arc<dyn ManageNetwork>,
|
||||
fetch: F,
|
||||
}
|
||||
|
||||
@ -99,7 +99,7 @@ impl<C, M, U, F> ParitySetClient<C, M, U, F>
|
||||
client: &Arc<C>,
|
||||
miner: &Arc<M>,
|
||||
updater: &Arc<U>,
|
||||
net: &Arc<ManageNetwork>,
|
||||
net: &Arc<dyn ManageNetwork>,
|
||||
fetch: F,
|
||||
) -> Self {
|
||||
ParitySetClient {
|
||||
|
@ -40,7 +40,7 @@ use v1::types::{TransactionModification, ConfirmationRequest, ConfirmationRespon
|
||||
/// Transactions confirmation (personal) rpc implementation.
|
||||
pub struct SignerClient<D: Dispatcher> {
|
||||
signer: Arc<SignerService>,
|
||||
accounts: Arc<dispatch::Accounts>,
|
||||
accounts: Arc<dyn dispatch::Accounts>,
|
||||
dispatcher: D,
|
||||
subscribers: Arc<Mutex<Subscribers<Sink<Vec<ConfirmationRequest>>>>>,
|
||||
deprecation_notice: DeprecationNotice,
|
||||
@ -49,7 +49,7 @@ pub struct SignerClient<D: Dispatcher> {
|
||||
impl<D: Dispatcher + 'static> SignerClient<D> {
|
||||
/// Create new instance of signer client.
|
||||
pub fn new(
|
||||
accounts: Arc<dispatch::Accounts>,
|
||||
accounts: Arc<dyn dispatch::Accounts>,
|
||||
dispatcher: D,
|
||||
signer: &Arc<SignerService>,
|
||||
executor: Executor,
|
||||
@ -81,7 +81,7 @@ impl<D: Dispatcher + 'static> SignerClient<D> {
|
||||
}
|
||||
|
||||
fn confirm_internal<F, T>(&self, id: U256, modification: TransactionModification, f: F) -> BoxFuture<WithToken<ConfirmationResponse>> where
|
||||
F: FnOnce(D, &Arc<dispatch::Accounts>, ConfirmationPayload) -> T,
|
||||
F: FnOnce(D, &Arc<dyn dispatch::Accounts>, ConfirmationPayload) -> T,
|
||||
T: IntoFuture<Item=WithToken<ConfirmationResponse>, Error=Error>,
|
||||
T::Future: Send + 'static
|
||||
{
|
||||
|
@ -91,7 +91,7 @@ fn schedule(executor: Executor,
|
||||
/// Implementation of functions that require signing when no trusted signer is used.
|
||||
pub struct SigningQueueClient<D> {
|
||||
signer: Arc<SignerService>,
|
||||
accounts: Arc<dispatch::Accounts>,
|
||||
accounts: Arc<dyn dispatch::Accounts>,
|
||||
dispatcher: D,
|
||||
executor: Executor,
|
||||
// None here means that the request hasn't yet been confirmed
|
||||
@ -101,7 +101,7 @@ pub struct SigningQueueClient<D> {
|
||||
|
||||
impl<D: Dispatcher + 'static> SigningQueueClient<D> {
|
||||
/// Creates a new signing queue client given shared signing queue.
|
||||
pub fn new(signer: &Arc<SignerService>, dispatcher: D, executor: Executor, accounts: &Arc<dispatch::Accounts>) -> Self {
|
||||
pub fn new(signer: &Arc<SignerService>, dispatcher: D, executor: Executor, accounts: &Arc<dyn dispatch::Accounts>) -> Self {
|
||||
SigningQueueClient {
|
||||
signer: signer.clone(),
|
||||
accounts: accounts.clone(),
|
||||
|
@ -37,14 +37,14 @@ use v1::types::{
|
||||
|
||||
/// Implementation of functions that require signing when no trusted signer is used.
|
||||
pub struct SigningUnsafeClient<D> {
|
||||
accounts: Arc<dispatch::Accounts>,
|
||||
accounts: Arc<dyn dispatch::Accounts>,
|
||||
dispatcher: D,
|
||||
deprecation_notice: DeprecationNotice,
|
||||
}
|
||||
|
||||
impl<D: Dispatcher + 'static> SigningUnsafeClient<D> {
|
||||
/// Creates new SigningUnsafeClient.
|
||||
pub fn new(accounts: &Arc<dispatch::Accounts>, dispatcher: D) -> Self {
|
||||
pub fn new(accounts: &Arc<dyn dispatch::Accounts>, dispatcher: D) -> Self {
|
||||
SigningUnsafeClient {
|
||||
accounts: accounts.clone(),
|
||||
dispatcher,
|
||||
|
@ -55,7 +55,7 @@ pub struct TestMinerService {
|
||||
/// Minimum gas price
|
||||
pub min_gas_price: RwLock<Option<U256>>,
|
||||
/// Signer (if any)
|
||||
pub signer: RwLock<Option<Box<EngineSigner>>>,
|
||||
pub signer: RwLock<Option<Box<dyn EngineSigner>>>,
|
||||
|
||||
authoring_params: RwLock<AuthoringParams>,
|
||||
}
|
||||
@ -102,7 +102,7 @@ impl StateClient for TestMinerService {
|
||||
}
|
||||
|
||||
impl EngineInfo for TestMinerService {
|
||||
fn engine(&self) -> &Engine {
|
||||
fn engine(&self) -> &dyn Engine {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
@ -31,5 +31,5 @@ impl ManageNetwork for TestManageNetwork {
|
||||
fn start_network(&self) {}
|
||||
fn stop_network(&self) {}
|
||||
fn num_peers_range(&self) -> RangeInclusive<u32> { 25..=50 }
|
||||
fn with_proto_context(&self, _: ProtocolId, _: &mut FnMut(&NetworkContext)) { }
|
||||
fn with_proto_context(&self, _: ProtocolId, _: &mut dyn FnMut(&dyn NetworkContext)) { }
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ pub struct Dependencies {
|
||||
pub updater: Arc<TestUpdater>,
|
||||
pub logger: Arc<RotatingLogger>,
|
||||
pub settings: Arc<NetworkSettings>,
|
||||
pub network: Arc<ManageNetwork>,
|
||||
pub network: Arc<dyn ManageNetwork>,
|
||||
pub ws_address: Option<Host>,
|
||||
}
|
||||
|
||||
|
@ -58,7 +58,7 @@ fn parity_set_client(
|
||||
client,
|
||||
miner,
|
||||
updater,
|
||||
&(net.clone() as Arc<ManageNetwork>),
|
||||
&(net.clone() as Arc<dyn ManageNetwork>),
|
||||
FakeFetch::new(Some(1)),
|
||||
)
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ use serde::{Deserialize, Deserializer};
|
||||
use serde::de::{Error, Visitor};
|
||||
|
||||
use ethereum_types::H256;
|
||||
use ethstore;
|
||||
|
||||
|
||||
/// Type of derivation
|
||||
pub enum DerivationType {
|
||||
|
@ -37,7 +37,7 @@ pub trait HashFetch: Send + Sync + 'static {
|
||||
/// 2. `on_done` - callback function invoked when the content is ready (or there was error during fetch)
|
||||
///
|
||||
/// This function may fail immediately when fetch cannot be initialized or content cannot be resolved.
|
||||
fn fetch(&self, hash: H256, abort: fetch::Abort, on_done: Box<Fn(Result<PathBuf, Error>) + Send>);
|
||||
fn fetch(&self, hash: H256, abort: fetch::Abort, on_done: Box<dyn Fn(Result<PathBuf, Error>) + Send>);
|
||||
}
|
||||
|
||||
/// Hash-fetching error.
|
||||
@ -111,12 +111,12 @@ pub struct Client<F: Fetch + 'static = fetch::Client> {
|
||||
contract: URLHintContract,
|
||||
fetch: F,
|
||||
executor: Executor,
|
||||
random_path: Arc<Fn() -> PathBuf + Sync + Send>,
|
||||
random_path: Arc<dyn Fn() -> PathBuf + Sync + Send>,
|
||||
}
|
||||
|
||||
impl<F: Fetch + 'static> Client<F> {
|
||||
/// Creates new instance of the `Client` given on-chain contract client, fetch service and task runner.
|
||||
pub fn with_fetch(contract: Arc<RegistrarClient<Call=Asynchronous>>, fetch: F, executor: Executor) -> Self {
|
||||
pub fn with_fetch(contract: Arc<dyn RegistrarClient<Call=Asynchronous>>, fetch: F, executor: Executor) -> Self {
|
||||
Client {
|
||||
contract: URLHintContract::new(contract),
|
||||
fetch: fetch,
|
||||
@ -127,7 +127,7 @@ impl<F: Fetch + 'static> Client<F> {
|
||||
}
|
||||
|
||||
impl<F: Fetch + 'static> HashFetch for Client<F> {
|
||||
fn fetch(&self, hash: H256, abort: fetch::Abort, on_done: Box<Fn(Result<PathBuf, Error>) + Send>) {
|
||||
fn fetch(&self, hash: H256, abort: fetch::Abort, on_done: Box<dyn Fn(Result<PathBuf, Error>) + Send>) {
|
||||
debug!(target: "fetch", "Fetching: {:?}", hash);
|
||||
|
||||
let random_path = self.random_path.clone();
|
||||
|
@ -35,7 +35,7 @@ extern crate registrar;
|
||||
|
||||
pub extern crate fetch;
|
||||
|
||||
#[macro_use]
|
||||
// #[macro_use]
|
||||
extern crate ethabi_derive;
|
||||
#[macro_use]
|
||||
extern crate ethabi_contract;
|
||||
|
@ -95,18 +95,18 @@ pub enum URLHintResult {
|
||||
/// URLHint Contract interface
|
||||
pub trait URLHint: Send + Sync {
|
||||
/// Resolves given id to registrar entry.
|
||||
fn resolve(&self, id: H256) -> Box<Future<Item = Option<URLHintResult>, Error = String> + Send>;
|
||||
fn resolve(&self, id: H256) -> Box<dyn Future<Item = Option<URLHintResult>, Error = String> + Send>;
|
||||
}
|
||||
|
||||
/// `URLHintContract` API
|
||||
pub struct URLHintContract {
|
||||
registrar: Registrar,
|
||||
client: Arc<RegistrarClient<Call=Asynchronous>>,
|
||||
client: Arc<dyn RegistrarClient<Call=Asynchronous>>,
|
||||
}
|
||||
|
||||
impl URLHintContract {
|
||||
/// Creates new `URLHintContract`
|
||||
pub fn new(client: Arc<RegistrarClient<Call=Asynchronous>>) -> Self {
|
||||
pub fn new(client: Arc<dyn RegistrarClient<Call=Asynchronous>>) -> Self {
|
||||
URLHintContract {
|
||||
registrar: Registrar::new(client.clone()),
|
||||
client: client,
|
||||
@ -159,7 +159,7 @@ fn decode_urlhint_output(output: (String, [u8; 20], Address)) -> Option<URLHintR
|
||||
}
|
||||
|
||||
impl URLHint for URLHintContract {
|
||||
fn resolve(&self, id: H256) -> Box<Future<Item = Option<URLHintResult>, Error = String> + Send> {
|
||||
fn resolve(&self, id: H256) -> Box<dyn Future<Item = Option<URLHintResult>, Error = String> + Send> {
|
||||
let client = self.client.clone();
|
||||
|
||||
let future = self.registrar.get_address(GITHUB_HINT)
|
||||
|
@ -21,6 +21,7 @@
|
||||
extern crate client_traits;
|
||||
extern crate common_types;
|
||||
extern crate ethabi;
|
||||
extern crate ethabi_derive;
|
||||
extern crate ethcore;
|
||||
extern crate ethcore_sync as sync;
|
||||
extern crate ethereum_types;
|
||||
@ -37,8 +38,6 @@ extern crate target_info;
|
||||
#[macro_use]
|
||||
extern crate ethabi_contract;
|
||||
#[macro_use]
|
||||
extern crate ethabi_derive;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
@ -144,11 +144,11 @@ pub struct Updater<O = OperationsContractClient, F = fetch::Client, T = StdTimeP
|
||||
// Useful environmental stuff.
|
||||
update_policy: UpdatePolicy,
|
||||
weak_self: Mutex<Weak<Updater<O, F, T, R>>>,
|
||||
client: Weak<BlockChainClient>,
|
||||
sync: Option<Weak<SyncProvider>>,
|
||||
client: Weak<dyn BlockChainClient>,
|
||||
sync: Option<Weak<dyn SyncProvider>>,
|
||||
fetcher: F,
|
||||
operations_client: O,
|
||||
exit_handler: Mutex<Option<Box<Fn() + 'static + Send>>>,
|
||||
exit_handler: Mutex<Option<Box<dyn Fn() + 'static + Send>>>,
|
||||
|
||||
time_provider: T,
|
||||
rng: R,
|
||||
@ -205,11 +205,11 @@ pub trait OperationsClient: Send + Sync + 'static {
|
||||
|
||||
/// `OperationsClient` that delegates calls to the operations contract.
|
||||
pub struct OperationsContractClient {
|
||||
client: Weak<BlockChainClient>,
|
||||
client: Weak<dyn BlockChainClient>,
|
||||
}
|
||||
|
||||
impl OperationsContractClient {
|
||||
fn new(client: Weak<BlockChainClient>) -> Self {
|
||||
fn new(client: Weak<dyn BlockChainClient>) -> Self {
|
||||
OperationsContractClient {
|
||||
client
|
||||
}
|
||||
@ -368,8 +368,8 @@ impl GenRange for ThreadRngGenRange {
|
||||
impl Updater {
|
||||
/// `Updater` constructor
|
||||
pub fn new(
|
||||
client: &Weak<BlockChainClient>,
|
||||
sync: &Weak<SyncProvider>,
|
||||
client: &Weak<dyn BlockChainClient>,
|
||||
sync: &Weak<dyn SyncProvider>,
|
||||
update_policy: UpdatePolicy,
|
||||
fetcher: fetch::Client,
|
||||
) -> Arc<Updater> {
|
||||
@ -756,7 +756,7 @@ pub mod tests {
|
||||
|
||||
#[derive(Clone)]
|
||||
struct FakeFetch {
|
||||
on_done: Arc<Mutex<Option<Box<Fn(Result<PathBuf, Error>) + Send>>>>,
|
||||
on_done: Arc<Mutex<Option<Box<dyn Fn(Result<PathBuf, Error>) + Send>>>>,
|
||||
}
|
||||
|
||||
impl FakeFetch {
|
||||
@ -772,7 +772,7 @@ pub mod tests {
|
||||
}
|
||||
|
||||
impl HashFetch for FakeFetch {
|
||||
fn fetch(&self, _hash: H256, _abort: fetch::Abort, on_done: Box<Fn(Result<PathBuf, Error>) + Send>) {
|
||||
fn fetch(&self, _hash: H256, _abort: fetch::Abort, on_done: Box<dyn Fn(Result<PathBuf, Error>) + Send>) {
|
||||
*self.on_done.lock() = Some(on_done);
|
||||
}
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ pub(crate) fn serde_error(expected: &str, field: Option<&str>) -> ErrorKind {
|
||||
}
|
||||
|
||||
impl Fail for Error {
|
||||
fn cause(&self) -> Option<&Fail> {
|
||||
fn cause(&self) -> Option<&dyn Fail> {
|
||||
self.inner.cause()
|
||||
}
|
||||
|
||||
|
@ -271,7 +271,7 @@ impl Client {
|
||||
}
|
||||
|
||||
impl Fetch for Client {
|
||||
type Result = Box<Future<Item=Response, Error=Error> + Send + 'static>;
|
||||
type Result = Box<dyn Future<Item=Response, Error=Error> + Send + 'static>;
|
||||
|
||||
fn fetch(&self, request: Request, abort: Abort) -> Self::Result {
|
||||
debug!(target: "fetch", "fetching: {:?}", request.url());
|
||||
@ -608,7 +608,7 @@ impl fmt::Display for Error {
|
||||
|
||||
impl ::std::error::Error for Error {
|
||||
fn description(&self) -> &str { "Fetch client error" }
|
||||
fn cause(&self) -> Option<&::std::error::Error> { None }
|
||||
fn cause(&self) -> Option<&dyn std::error::Error> { None }
|
||||
}
|
||||
|
||||
impl From<hyper::Error> for Error {
|
||||
|
@ -32,7 +32,7 @@ use std::{fs, io, error};
|
||||
use kvdb::DBTransaction;
|
||||
use kvdb_rocksdb::{CompactionProfile, Database, DatabaseConfig};
|
||||
|
||||
fn other_io_err<E>(e: E) -> io::Error where E: Into<Box<error::Error + Send + Sync>> {
|
||||
fn other_io_err<E>(e: E) -> io::Error where E: Into<Box<dyn error::Error + Send + Sync>> {
|
||||
io::Error::new(io::ErrorKind::Other, e)
|
||||
}
|
||||
|
||||
@ -209,7 +209,7 @@ impl TempIndex {
|
||||
/// Manages database migration.
|
||||
pub struct Manager {
|
||||
config: Config,
|
||||
migrations: Vec<Box<Migration>>,
|
||||
migrations: Vec<Box<dyn Migration>>,
|
||||
}
|
||||
|
||||
impl Manager {
|
||||
@ -317,7 +317,7 @@ impl Manager {
|
||||
}
|
||||
|
||||
/// Find all needed migrations.
|
||||
fn migrations_from(&mut self, version: u32) -> Vec<&mut Box<Migration>> {
|
||||
fn migrations_from(&mut self, version: u32) -> Vec<&mut Box<dyn Migration>> {
|
||||
self.migrations.iter_mut().filter(|m| m.version() > version).collect()
|
||||
}
|
||||
}
|
||||
|
@ -97,12 +97,12 @@ impl SocketAddrExt for Ipv4Addr {
|
||||
self.is_multicast() ||
|
||||
self.is_shared_space() ||
|
||||
self.is_special_purpose() ||
|
||||
self.is_benchmarking() ||
|
||||
SocketAddrExt::is_benchmarking(self) ||
|
||||
self.is_future_use()
|
||||
}
|
||||
|
||||
fn is_usable_public(&self) -> bool {
|
||||
!self.is_reserved() &&
|
||||
!SocketAddrExt::is_reserved(self) &&
|
||||
!self.is_private()
|
||||
}
|
||||
|
||||
@ -186,7 +186,7 @@ impl SocketAddrExt for IpAddr {
|
||||
|
||||
fn is_reserved(&self) -> bool {
|
||||
match *self {
|
||||
IpAddr::V4(ref ip) => ip.is_reserved(),
|
||||
IpAddr::V4(ref ip) => SocketAddrExt::is_reserved(ip),
|
||||
IpAddr::V6(ref ip) => ip.is_reserved(),
|
||||
}
|
||||
}
|
||||
@ -290,7 +290,7 @@ pub fn select_public_address(port: u16) -> SocketAddr {
|
||||
//prefer IPV4 bindings
|
||||
for addr in &list { //TODO: use better criteria than just the first in the list
|
||||
match addr {
|
||||
IpAddr::V4(a) if !a.is_reserved() => {
|
||||
IpAddr::V4(a) if !SocketAddrExt::is_reserved(a) => {
|
||||
return SocketAddr::V4(SocketAddrV4::new(*a, port));
|
||||
},
|
||||
_ => {},
|
||||
|
@ -145,7 +145,7 @@ impl From<Option<io::Error>> for AddressResolveError {
|
||||
}
|
||||
|
||||
impl error::Error for Error {
|
||||
fn source(&self) -> Option<&(error::Error + 'static)> {
|
||||
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
|
||||
match self {
|
||||
Error::Decompression(e) => Some(e),
|
||||
Error::Rlp(e) => Some(e),
|
||||
|
@ -75,7 +75,7 @@ pub enum NetworkIoMessage {
|
||||
/// Register a new protocol handler.
|
||||
AddHandler {
|
||||
/// Handler shared instance.
|
||||
handler: Arc<NetworkProtocolHandler + Sync>,
|
||||
handler: Arc<dyn NetworkProtocolHandler + Sync>,
|
||||
/// Protocol Id.
|
||||
protocol: ProtocolId,
|
||||
/// Supported protocol versions and number of packet IDs reserved by the protocol (packet count).
|
||||
@ -361,15 +361,15 @@ impl<'a, T> NetworkContext for &'a T where T: ?Sized + NetworkContext {
|
||||
/// `Message` is the type for message data.
|
||||
pub trait NetworkProtocolHandler: Sync + Send {
|
||||
/// Initialize the handler
|
||||
fn initialize(&self, _io: &NetworkContext) {}
|
||||
fn initialize(&self, _io: &dyn NetworkContext) {}
|
||||
/// Called when new network packet received.
|
||||
fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]);
|
||||
fn read(&self, io: &dyn NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]);
|
||||
/// Called when new peer is connected. Only called when peer supports the same protocol.
|
||||
fn connected(&self, io: &NetworkContext, peer: &PeerId);
|
||||
fn connected(&self, io: &dyn NetworkContext, peer: &PeerId);
|
||||
/// Called when a previously connected peer disconnects.
|
||||
fn disconnected(&self, io: &NetworkContext, peer: &PeerId);
|
||||
fn disconnected(&self, io: &dyn NetworkContext, peer: &PeerId);
|
||||
/// Timer function called after a timeout created with `NetworkContext::timeout`.
|
||||
fn timeout(&self, _io: &NetworkContext, _timer: TimerToken) {}
|
||||
fn timeout(&self, _io: &dyn NetworkContext, _timer: TimerToken) {}
|
||||
}
|
||||
|
||||
/// Non-reserved peer modes.
|
||||
|
@ -16,10 +16,9 @@
|
||||
|
||||
extern crate futures;
|
||||
extern crate ethabi;
|
||||
extern crate ethabi_derive;
|
||||
extern crate keccak_hash;
|
||||
|
||||
#[macro_use]
|
||||
extern crate ethabi_derive;
|
||||
#[macro_use]
|
||||
extern crate ethabi_contract;
|
||||
|
||||
|
@ -24,25 +24,25 @@ use_contract!(registrar, "res/registrar.json");
|
||||
// Maps a domain name to an Ethereum address
|
||||
const DNS_A_RECORD: &'static str = "A";
|
||||
|
||||
pub type Asynchronous = Box<Future<Item=Bytes, Error=String> + Send>;
|
||||
pub type Asynchronous = Box<dyn Future<Item=Bytes, Error=String> + Send>;
|
||||
pub type Synchronous = Result<Bytes, String>;
|
||||
|
||||
/// Registrar is dedicated interface to access the registrar contract
|
||||
/// which in turn generates an address when a client requests one
|
||||
pub struct Registrar {
|
||||
client: Arc<RegistrarClient<Call=Asynchronous>>,
|
||||
client: Arc<dyn RegistrarClient<Call=Asynchronous>>,
|
||||
}
|
||||
|
||||
impl Registrar {
|
||||
/// Registrar constructor
|
||||
pub fn new(client: Arc<RegistrarClient<Call=Asynchronous>>) -> Self {
|
||||
pub fn new(client: Arc<dyn RegistrarClient<Call=Asynchronous>>) -> Self {
|
||||
Self {
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate an address for the given key
|
||||
pub fn get_address<'a>(&self, key: &'a str) -> Box<Future<Item = Address, Error = String> + Send> {
|
||||
pub fn get_address<'a>(&self, key: &'a str) -> Box<dyn Future<Item = Address, Error = String> + Send> {
|
||||
// Address of the registrar itself
|
||||
let registrar_address = match self.client.registrar_address() {
|
||||
Ok(a) => a,
|
||||
|
@ -40,7 +40,7 @@ pub trait Decompressor {
|
||||
}
|
||||
|
||||
/// Call this function to compress rlp.
|
||||
pub fn compress(c: &[u8], swapper: &Compressor) -> ElasticArray1024<u8> {
|
||||
pub fn compress(c: &[u8], swapper: &dyn Compressor) -> ElasticArray1024<u8> {
|
||||
let rlp = Rlp::new(c);
|
||||
if rlp.is_data() {
|
||||
ElasticArray1024::from_slice(swapper.compressed(rlp.as_raw()).unwrap_or_else(|| rlp.as_raw()))
|
||||
@ -50,7 +50,7 @@ pub fn compress(c: &[u8], swapper: &Compressor) -> ElasticArray1024<u8> {
|
||||
}
|
||||
|
||||
/// Call this function to decompress rlp.
|
||||
pub fn decompress(c: &[u8], swapper: &Decompressor) -> ElasticArray1024<u8> {
|
||||
pub fn decompress(c: &[u8], swapper: &dyn Decompressor) -> ElasticArray1024<u8> {
|
||||
let rlp = Rlp::new(c);
|
||||
if rlp.is_data() {
|
||||
ElasticArray1024::from_slice(swapper.decompressed(rlp.as_raw()).unwrap_or_else(|| rlp.as_raw()))
|
||||
|
Loading…
Reference in New Issue
Block a user