2018-06-04 10:19:50 +02:00
|
|
|
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
|
2016-07-25 16:09:47 +02:00
|
|
|
// This file is part of Parity.
|
|
|
|
|
|
|
|
// Parity is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
|
|
|
// Parity is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
|
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2018-04-04 11:50:28 +02:00
|
|
|
use std::any::Any;
|
2017-08-24 15:17:48 +02:00
|
|
|
use std::sync::{Arc, Weak};
|
2018-01-31 11:41:29 +01:00
|
|
|
use std::time::{Duration, Instant};
|
|
|
|
use std::thread;
|
2017-07-10 13:21:11 +02:00
|
|
|
|
2018-05-09 08:47:21 +02:00
|
|
|
use ansi_term::Colour;
|
2018-07-11 12:19:54 +02:00
|
|
|
use bytes::Bytes;
|
2017-02-10 01:07:06 +01:00
|
|
|
use ethcore::account_provider::{AccountProvider, AccountProviderSettings};
|
2018-07-11 12:19:54 +02:00
|
|
|
use ethcore::client::{BlockId, CallContract, Client, Mode, DatabaseCompactionProfile, VMType, BlockChainClient, BlockInfo};
|
2017-07-10 13:21:11 +02:00
|
|
|
use ethcore::ethstore::ethkey;
|
2018-04-13 17:34:27 +02:00
|
|
|
use ethcore::miner::{stratum, Miner, MinerService, MinerOptions};
|
2018-09-13 12:58:49 +02:00
|
|
|
use ethcore::snapshot::{self, SnapshotConfiguration};
|
2017-09-26 14:19:08 +02:00
|
|
|
use ethcore::spec::{SpecParams, OptimizeFor};
|
2016-12-02 18:21:54 +01:00
|
|
|
use ethcore::verification::queue::VerifierSettings;
|
2018-01-11 17:49:10 +01:00
|
|
|
use ethcore_logger::{Config as LogConfig, RotatingLogger};
|
2018-03-13 11:49:57 +01:00
|
|
|
use ethcore_service::ClientService;
|
2018-07-11 12:19:54 +02:00
|
|
|
use ethereum_types::Address;
|
2018-04-10 12:13:49 +02:00
|
|
|
use sync::{self, SyncConfig};
|
2018-04-10 13:51:29 +02:00
|
|
|
use miner::work_notify::WorkPoster;
|
2018-07-11 12:19:54 +02:00
|
|
|
use futures::IntoFuture;
|
2018-03-14 13:40:54 +01:00
|
|
|
use hash_fetch::{self, fetch};
|
2017-07-10 13:21:11 +02:00
|
|
|
use informant::{Informant, LightNodeInformantData, FullNodeInformantData};
|
2018-01-11 17:49:10 +01:00
|
|
|
use journaldb::Algorithm;
|
2017-07-10 13:21:11 +02:00
|
|
|
use light::Cache as LightDataCache;
|
2018-01-11 17:49:10 +01:00
|
|
|
use miner::external::ExternalMiner;
|
|
|
|
use node_filter::NodeFilter;
|
2018-10-22 09:40:50 +02:00
|
|
|
use parity_runtime::Runtime;
|
2018-05-22 19:07:27 +02:00
|
|
|
use parity_rpc::{Origin, Metadata, NetworkSettings, informant, is_major_importing};
|
2017-07-10 13:21:11 +02:00
|
|
|
use updater::{UpdatePolicy, Updater};
|
2017-12-22 14:37:39 +01:00
|
|
|
use parity_version::version;
|
2018-04-09 16:14:33 +02:00
|
|
|
use ethcore_private_tx::{ProviderConfig, EncryptorConfig, SecretStoreEncryptor};
|
2016-10-03 11:13:10 +02:00
|
|
|
use params::{
|
|
|
|
SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch,
|
2016-11-05 10:38:00 +01:00
|
|
|
tracing_switch_to_bool, fatdb_switch_to_bool, mode_switch_to_bool
|
2016-10-03 11:13:10 +02:00
|
|
|
};
|
2018-04-13 21:14:53 +02:00
|
|
|
use helpers::{to_client_config, execute_upgrades, passwords_from_files};
|
2016-12-12 16:51:07 +01:00
|
|
|
use upgrade::upgrade_key_location;
|
2017-06-22 20:08:56 +02:00
|
|
|
use dir::{Directories, DatabaseDirectories};
|
2016-07-25 16:09:47 +02:00
|
|
|
use cache::CacheConfig;
|
2016-09-26 19:21:25 +02:00
|
|
|
use user_defaults::UserDefaults;
|
2017-02-14 19:30:37 +01:00
|
|
|
use ipfs;
|
2018-05-22 19:07:27 +02:00
|
|
|
use jsonrpc_core;
|
2016-07-25 16:09:47 +02:00
|
|
|
use modules;
|
2018-07-11 12:19:54 +02:00
|
|
|
use registrar::{RegistrarClient, Asynchronous};
|
2016-07-25 16:09:47 +02:00
|
|
|
use rpc;
|
2017-05-24 12:24:07 +02:00
|
|
|
use rpc_apis;
|
|
|
|
use secretstore;
|
|
|
|
use signer;
|
2018-04-13 21:14:53 +02:00
|
|
|
use db;
|
2018-06-22 15:09:15 +02:00
|
|
|
use ethkey::Password;
|
2016-07-25 16:09:47 +02:00
|
|
|
|
2016-09-02 20:24:59 +02:00
|
|
|
// how often to take periodic snapshots.
|
2017-08-22 11:24:56 +02:00
|
|
|
const SNAPSHOT_PERIOD: u64 = 5000;
|
2016-09-02 20:24:59 +02:00
|
|
|
|
|
|
|
// how many blocks to wait before starting a periodic snapshot.
|
2016-10-30 15:39:36 +01:00
|
|
|
const SNAPSHOT_HISTORY: u64 = 100;
|
2016-09-02 18:48:07 +02:00
|
|
|
|
2017-03-22 22:00:52 +01:00
|
|
|
// Number of minutes before a given gas price corpus should expire.
|
|
|
|
// Light client only.
|
2018-03-14 12:29:52 +01:00
|
|
|
const GAS_CORPUS_EXPIRATION_MINUTES: u64 = 60 * 6;
|
2017-03-22 22:00:52 +01:00
|
|
|
|
2017-01-04 14:48:32 +01:00
|
|
|
// Pops along with error messages when a password is missing or invalid.
|
2018-09-28 15:26:38 +02:00
|
|
|
const VERIFY_PASSWORD_HINT: &str = "Make sure valid password is present in files passed using `--password` or in the configuration file.";
|
|
|
|
|
|
|
|
// Full client number of DNS threads
|
|
|
|
const FETCH_FULL_NUM_DNS_THREADS: usize = 4;
|
|
|
|
|
|
|
|
// Light client number of DNS threads
|
|
|
|
const FETCH_LIGHT_NUM_DNS_THREADS: usize = 1;
|
2017-01-04 14:48:32 +01:00
|
|
|
|
2016-07-25 16:09:47 +02:00
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
pub struct RunCmd {
|
|
|
|
pub cache_config: CacheConfig,
|
|
|
|
pub dirs: Directories,
|
|
|
|
pub spec: SpecType,
|
|
|
|
pub pruning: Pruning,
|
2016-10-14 14:44:56 +02:00
|
|
|
pub pruning_history: u64,
|
2017-01-20 13:25:53 +01:00
|
|
|
pub pruning_memory: usize,
|
2016-07-25 16:09:47 +02:00
|
|
|
/// Some if execution should be daemonized. Contains pid_file path.
|
|
|
|
pub daemon: Option<String>,
|
|
|
|
pub logger_config: LogConfig,
|
|
|
|
pub miner_options: MinerOptions,
|
2018-01-09 12:43:36 +01:00
|
|
|
pub gas_price_percentile: usize,
|
2018-06-18 13:42:54 +02:00
|
|
|
pub poll_lifetime: u32,
|
2017-04-13 16:32:07 +02:00
|
|
|
pub ws_conf: rpc::WsConfiguration,
|
2017-04-03 10:27:37 +02:00
|
|
|
pub http_conf: rpc::HttpConfiguration,
|
|
|
|
pub ipc_conf: rpc::IpcConfiguration,
|
2018-04-10 12:13:49 +02:00
|
|
|
pub net_conf: sync::NetworkConfiguration,
|
2016-12-05 15:54:31 +01:00
|
|
|
pub network_id: Option<u64>,
|
2016-10-18 18:16:00 +02:00
|
|
|
pub warp_sync: bool,
|
2018-03-29 11:20:27 +02:00
|
|
|
pub warp_barrier: Option<u64>,
|
2016-07-25 16:09:47 +02:00
|
|
|
pub acc_conf: AccountsConfig,
|
2017-07-16 18:22:45 +02:00
|
|
|
pub gas_pricer_conf: GasPricerConfig,
|
2016-07-25 16:09:47 +02:00
|
|
|
pub miner_extras: MinerExtras,
|
2016-11-23 20:35:21 +01:00
|
|
|
pub update_policy: UpdatePolicy,
|
2016-11-05 10:38:00 +01:00
|
|
|
pub mode: Option<Mode>,
|
2016-07-25 16:09:47 +02:00
|
|
|
pub tracing: Switch,
|
2016-10-03 11:13:10 +02:00
|
|
|
pub fat_db: Switch,
|
2016-07-25 16:09:47 +02:00
|
|
|
pub compaction: DatabaseCompactionProfile,
|
|
|
|
pub vm_type: VMType,
|
|
|
|
pub geth_compatibility: bool,
|
2018-11-16 14:00:34 +01:00
|
|
|
pub experimental_rpcs: bool,
|
2016-07-25 16:09:47 +02:00
|
|
|
pub net_settings: NetworkSettings,
|
2017-02-16 14:41:33 +01:00
|
|
|
pub ipfs_conf: ipfs::Configuration,
|
2017-02-20 16:13:21 +01:00
|
|
|
pub secretstore_conf: secretstore::Configuration,
|
2018-04-09 16:14:33 +02:00
|
|
|
pub private_provider_conf: ProviderConfig,
|
|
|
|
pub private_encryptor_conf: EncryptorConfig,
|
|
|
|
pub private_tx_enabled: bool,
|
2016-07-25 16:09:47 +02:00
|
|
|
pub name: String,
|
|
|
|
pub custom_bootnodes: bool,
|
2018-04-13 17:34:27 +02:00
|
|
|
pub stratum: Option<stratum::Options>,
|
2018-09-13 12:58:49 +02:00
|
|
|
pub snapshot_conf: SnapshotConfiguration,
|
2016-10-24 15:09:13 +02:00
|
|
|
pub check_seal: bool,
|
2016-11-22 18:03:35 +01:00
|
|
|
pub download_old_blocks: bool,
|
2016-12-02 18:21:54 +01:00
|
|
|
pub verifier_settings: VerifierSettings,
|
2017-03-22 16:45:50 +01:00
|
|
|
pub serve_light: bool,
|
|
|
|
pub light: bool,
|
2017-05-04 12:13:50 +02:00
|
|
|
pub no_persistent_txqueue: bool,
|
2018-03-27 13:56:59 +02:00
|
|
|
pub whisper: ::whisper::Config,
|
|
|
|
pub no_hardcoded_sync: bool,
|
2018-10-26 13:21:36 +02:00
|
|
|
pub max_round_blocks_to_import: usize,
|
2018-09-12 11:47:01 +02:00
|
|
|
pub on_demand_retry_count: Option<usize>,
|
|
|
|
pub on_demand_inactive_time_limit: Option<u64>,
|
2016-07-25 16:09:47 +02:00
|
|
|
}
|
|
|
|
|
2017-02-20 17:21:55 +01:00
|
|
|
// node info fetcher for the local store.
|
|
|
|
struct FullNodeInfo {
|
2017-05-04 12:13:50 +02:00
|
|
|
miner: Option<Arc<Miner>>, // TODO: only TXQ needed, just use that after decoupling.
|
2017-02-20 17:21:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
impl ::local_store::NodeInfo for FullNodeInfo {
|
2018-01-11 17:49:10 +01:00
|
|
|
fn pending_transactions(&self) -> Vec<::transaction::PendingTransaction> {
|
2017-05-04 12:13:50 +02:00
|
|
|
let miner = match self.miner.as_ref() {
|
|
|
|
Some(m) => m,
|
|
|
|
None => return Vec::new(),
|
|
|
|
};
|
|
|
|
|
2018-04-13 17:34:27 +02:00
|
|
|
miner.local_transactions()
|
|
|
|
.values()
|
|
|
|
.filter_map(|status| match *status {
|
|
|
|
::miner::pool::local_transactions::Status::Pending(ref tx) => Some(tx.pending().clone()),
|
|
|
|
_ => None,
|
|
|
|
})
|
2017-02-20 17:21:55 +01:00
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
}
|
2017-01-06 16:05:58 +01:00
|
|
|
|
2018-01-31 11:41:29 +01:00
|
|
|
type LightClient = ::light::client::Client<::light_helpers::EpochFetch>;
|
|
|
|
|
2017-03-22 18:32:04 +01:00
|
|
|
// helper for light execution.
|
2018-04-04 11:50:28 +02:00
|
|
|
fn execute_light_impl(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<RunningClient, String> {
|
2017-03-22 18:32:04 +01:00
|
|
|
use light::client as light_client;
|
2018-04-10 12:13:49 +02:00
|
|
|
use sync::{LightSyncParams, LightSync, ManageNetwork};
|
2017-09-02 20:09:13 +02:00
|
|
|
use parking_lot::{Mutex, RwLock};
|
2017-03-22 18:32:04 +01:00
|
|
|
|
|
|
|
// load spec
|
2017-09-25 19:45:33 +02:00
|
|
|
let spec = cmd.spec.spec(SpecParams::new(cmd.dirs.cache.as_ref(), OptimizeFor::Memory))?;
|
2017-03-22 18:32:04 +01:00
|
|
|
|
|
|
|
// load genesis hash
|
|
|
|
let genesis_hash = spec.genesis_header().hash();
|
|
|
|
|
|
|
|
// database paths
|
|
|
|
let db_dirs = cmd.dirs.database(genesis_hash, cmd.spec.legacy_fork_name(), spec.data_dir.clone());
|
|
|
|
|
|
|
|
// user defaults path
|
|
|
|
let user_defaults_path = db_dirs.user_defaults_path();
|
|
|
|
|
|
|
|
// load user defaults
|
|
|
|
let user_defaults = UserDefaults::load(&user_defaults_path)?;
|
|
|
|
|
|
|
|
// select pruning algorithm
|
|
|
|
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
|
|
|
|
|
|
|
|
// execute upgrades
|
2018-04-13 21:14:53 +02:00
|
|
|
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?;
|
2017-03-22 18:32:04 +01:00
|
|
|
|
|
|
|
// create dirs used by parity
|
2018-07-11 12:19:54 +02:00
|
|
|
cmd.dirs.create_dirs(cmd.acc_conf.unlocked_accounts.len() == 0, cmd.secretstore_conf.enabled)?;
|
2017-03-22 18:32:04 +01:00
|
|
|
|
2017-06-22 20:08:56 +02:00
|
|
|
//print out running parity environment
|
2018-09-09 00:43:24 +02:00
|
|
|
print_running_environment(&spec.data_dir, &cmd.dirs, &db_dirs);
|
2017-06-22 20:08:56 +02:00
|
|
|
|
2017-03-22 18:32:04 +01:00
|
|
|
info!("Running in experimental {} mode.", Colour::Blue.bold().paint("Light Client"));
|
|
|
|
|
2017-04-07 17:38:03 +02:00
|
|
|
// TODO: configurable cache size.
|
2018-03-14 12:29:52 +01:00
|
|
|
let cache = LightDataCache::new(Default::default(), Duration::from_secs(60 * GAS_CORPUS_EXPIRATION_MINUTES));
|
2017-09-02 20:09:13 +02:00
|
|
|
let cache = Arc::new(Mutex::new(cache));
|
2017-04-07 17:38:03 +02:00
|
|
|
|
2017-03-22 18:32:04 +01:00
|
|
|
// start client and create transaction queue.
|
|
|
|
let mut config = light_client::Config {
|
|
|
|
queue: Default::default(),
|
|
|
|
chain_column: ::ethcore::db::COL_LIGHT_CHAIN,
|
2017-07-10 13:21:11 +02:00
|
|
|
verify_full: true,
|
2017-07-27 13:50:12 +02:00
|
|
|
check_seal: cmd.check_seal,
|
2018-03-27 13:56:59 +02:00
|
|
|
no_hardcoded_sync: cmd.no_hardcoded_sync,
|
2017-03-22 18:32:04 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024;
|
|
|
|
config.queue.verifier_settings = cmd.verifier_settings;
|
|
|
|
|
2017-09-05 17:54:05 +02:00
|
|
|
// start on_demand service.
|
2018-09-12 11:47:01 +02:00
|
|
|
let on_demand = Arc::new({
|
|
|
|
let mut on_demand = ::light::on_demand::OnDemand::new(cache.clone());
|
|
|
|
on_demand.default_retry_number(cmd.on_demand_retry_count.unwrap_or(::light::on_demand::DEFAULT_RETRY_COUNT));
|
|
|
|
on_demand.query_inactive_time_limit(cmd.on_demand_inactive_time_limit.map(Duration::from_millis)
|
|
|
|
.unwrap_or(::light::on_demand::DEFAULT_QUERY_TIME_LIMIT));
|
|
|
|
on_demand
|
|
|
|
});
|
2017-09-05 17:54:05 +02:00
|
|
|
|
|
|
|
let sync_handle = Arc::new(RwLock::new(Weak::new()));
|
|
|
|
let fetch = ::light_helpers::EpochFetch {
|
|
|
|
on_demand: on_demand.clone(),
|
|
|
|
sync: sync_handle.clone(),
|
|
|
|
};
|
|
|
|
|
2018-03-01 19:53:15 +01:00
|
|
|
// initialize database.
|
2018-04-13 21:14:53 +02:00
|
|
|
let db = db::open_db(&db_dirs.client_path(algorithm).to_str().expect("DB path could not be converted to string."),
|
|
|
|
&cmd.cache_config,
|
2018-07-10 17:33:25 +02:00
|
|
|
&cmd.compaction).map_err(|e| format!("Failed to open database {:?}", e))?;
|
2018-03-01 19:53:15 +01:00
|
|
|
|
|
|
|
let service = light_client::Service::start(config, &spec, fetch, db, cache.clone())
|
2017-03-22 18:32:04 +01:00
|
|
|
.map_err(|e| format!("Error starting light client: {}", e))?;
|
2018-04-04 11:50:28 +02:00
|
|
|
let client = service.client().clone();
|
2017-03-22 18:32:04 +01:00
|
|
|
let txq = Arc::new(RwLock::new(::light::transaction_queue::TransactionQueue::default()));
|
2018-01-31 11:41:29 +01:00
|
|
|
let provider = ::light::provider::LightProvider::new(client.clone(), txq.clone());
|
2017-03-22 18:32:04 +01:00
|
|
|
|
|
|
|
// start network.
|
|
|
|
// set up bootnodes
|
|
|
|
let mut net_conf = cmd.net_conf;
|
|
|
|
if !cmd.custom_bootnodes {
|
|
|
|
net_conf.boot_nodes = spec.nodes.clone();
|
|
|
|
}
|
|
|
|
|
2017-07-14 20:40:28 +02:00
|
|
|
let mut attached_protos = Vec::new();
|
|
|
|
let whisper_factory = if cmd.whisper.enabled {
|
2017-09-10 18:02:14 +02:00
|
|
|
let whisper_factory = ::whisper::setup(cmd.whisper.target_message_pool_size, &mut attached_protos)
|
2017-07-14 20:40:28 +02:00
|
|
|
.map_err(|e| format!("Failed to initialize whisper: {}", e))?;
|
|
|
|
whisper_factory
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
2017-03-22 18:32:04 +01:00
|
|
|
// set network path.
|
|
|
|
net_conf.net_config_path = Some(db_dirs.network_path().to_string_lossy().into_owned());
|
|
|
|
let sync_params = LightSyncParams {
|
|
|
|
network_config: net_conf.into_basic().map_err(|e| format!("Failed to produce network config: {}", e))?,
|
|
|
|
client: Arc::new(provider),
|
|
|
|
network_id: cmd.network_id.unwrap_or(spec.network_id()),
|
2018-04-10 12:13:49 +02:00
|
|
|
subprotocol_name: sync::LIGHT_PROTOCOL,
|
2017-03-22 22:00:52 +01:00
|
|
|
handlers: vec![on_demand.clone()],
|
2017-07-14 20:40:28 +02:00
|
|
|
attached_protos: attached_protos,
|
2017-03-22 18:32:04 +01:00
|
|
|
};
|
|
|
|
let light_sync = LightSync::new(sync_params).map_err(|e| format!("Error starting network: {}", e))?;
|
2017-03-22 22:00:52 +01:00
|
|
|
let light_sync = Arc::new(light_sync);
|
2017-09-05 17:54:05 +02:00
|
|
|
*sync_handle.write() = Arc::downgrade(&light_sync);
|
2017-03-22 18:32:04 +01:00
|
|
|
|
2017-03-22 22:00:52 +01:00
|
|
|
// spin up event loop
|
2018-10-22 09:40:50 +02:00
|
|
|
let runtime = Runtime::with_default_thread_count();
|
2017-03-22 22:00:52 +01:00
|
|
|
|
2017-03-23 22:20:00 +01:00
|
|
|
// queue cull service.
|
|
|
|
let queue_cull = Arc::new(::light_helpers::QueueCull {
|
2018-01-31 11:41:29 +01:00
|
|
|
client: client.clone(),
|
2017-03-23 22:20:00 +01:00
|
|
|
sync: light_sync.clone(),
|
|
|
|
on_demand: on_demand.clone(),
|
|
|
|
txq: txq.clone(),
|
2018-10-22 09:40:50 +02:00
|
|
|
executor: runtime.executor(),
|
2017-03-23 22:20:00 +01:00
|
|
|
});
|
|
|
|
|
|
|
|
service.register_handler(queue_cull).map_err(|e| format!("Error attaching service: {:?}", e))?;
|
|
|
|
|
|
|
|
// start the network.
|
|
|
|
light_sync.start_network();
|
|
|
|
|
2017-03-22 22:00:52 +01:00
|
|
|
// fetch service
|
2018-09-28 15:26:38 +02:00
|
|
|
let fetch = fetch::Client::new(FETCH_LIGHT_NUM_DNS_THREADS).map_err(|e| format!("Error starting fetch client: {:?}", e))?;
|
2017-03-22 18:32:04 +01:00
|
|
|
let passwords = passwords_from_files(&cmd.acc_conf.password_files)?;
|
|
|
|
|
|
|
|
// prepare account provider
|
2017-03-22 22:00:52 +01:00
|
|
|
let account_provider = Arc::new(prepare_account_provider(&cmd.spec, &cmd.dirs, &spec.data_dir, cmd.acc_conf, &passwords)?);
|
|
|
|
let rpc_stats = Arc::new(informant::RpcStats::default());
|
2017-05-24 12:24:07 +02:00
|
|
|
|
|
|
|
// the dapps server
|
2018-03-20 18:57:37 +01:00
|
|
|
let signer_service = Arc::new(signer::new_service(&cmd.ws_conf, &cmd.logger_config));
|
2017-05-24 12:24:07 +02:00
|
|
|
|
2017-03-23 22:20:00 +01:00
|
|
|
// start RPCs
|
2017-03-22 22:00:52 +01:00
|
|
|
let deps_for_rpc_apis = Arc::new(rpc_apis::LightDependencies {
|
2017-05-24 12:24:07 +02:00
|
|
|
signer_service: signer_service,
|
2018-01-31 11:41:29 +01:00
|
|
|
client: client.clone(),
|
2017-03-22 22:00:52 +01:00
|
|
|
sync: light_sync.clone(),
|
|
|
|
net: light_sync.clone(),
|
|
|
|
secret_store: account_provider,
|
|
|
|
logger: logger,
|
|
|
|
settings: Arc::new(cmd.net_settings),
|
2017-05-24 12:24:07 +02:00
|
|
|
on_demand: on_demand,
|
2017-07-10 13:21:11 +02:00
|
|
|
cache: cache.clone(),
|
2017-03-22 22:00:52 +01:00
|
|
|
transaction_queue: txq,
|
2017-05-24 12:24:07 +02:00
|
|
|
ws_address: cmd.ws_conf.address(),
|
|
|
|
fetch: fetch,
|
2017-03-22 22:00:52 +01:00
|
|
|
geth_compatibility: cmd.geth_compatibility,
|
2018-11-16 14:00:34 +01:00
|
|
|
experimental_rpcs: cmd.experimental_rpcs,
|
2018-10-22 09:40:50 +02:00
|
|
|
executor: runtime.executor(),
|
2017-07-14 20:40:28 +02:00
|
|
|
whisper_rpc: whisper_factory,
|
2018-04-09 16:14:33 +02:00
|
|
|
private_tx_service: None, //TODO: add this to client.
|
2018-01-09 12:43:36 +01:00
|
|
|
gas_price_percentile: cmd.gas_price_percentile,
|
2018-06-18 13:42:54 +02:00
|
|
|
poll_lifetime: cmd.poll_lifetime
|
2017-03-22 22:00:52 +01:00
|
|
|
});
|
|
|
|
|
|
|
|
let dependencies = rpc::Dependencies {
|
|
|
|
apis: deps_for_rpc_apis.clone(),
|
2018-10-22 09:40:50 +02:00
|
|
|
executor: runtime.executor(),
|
2017-03-22 22:00:52 +01:00
|
|
|
stats: rpc_stats.clone(),
|
|
|
|
};
|
|
|
|
|
|
|
|
// start rpc servers
|
2018-05-22 19:07:27 +02:00
|
|
|
let rpc_direct = rpc::setup_apis(rpc_apis::ApiSet::All, &dependencies);
|
2018-04-04 11:50:28 +02:00
|
|
|
let ws_server = rpc::new_ws(cmd.ws_conf, &dependencies)?;
|
2018-07-11 12:19:54 +02:00
|
|
|
let http_server = rpc::new_http("HTTP JSON-RPC", "jsonrpc", cmd.http_conf.clone(), &dependencies)?;
|
2018-04-04 11:50:28 +02:00
|
|
|
let ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?;
|
2017-03-22 22:00:52 +01:00
|
|
|
|
2017-07-10 13:21:11 +02:00
|
|
|
// the informant
|
|
|
|
let informant = Arc::new(Informant::new(
|
|
|
|
LightNodeInformantData {
|
2018-01-31 11:41:29 +01:00
|
|
|
client: client.clone(),
|
2017-07-10 13:21:11 +02:00
|
|
|
sync: light_sync.clone(),
|
|
|
|
cache: cache,
|
|
|
|
},
|
|
|
|
None,
|
|
|
|
Some(rpc_stats),
|
|
|
|
cmd.logger_config.color,
|
|
|
|
));
|
2018-05-01 14:16:03 +02:00
|
|
|
service.add_notify(informant.clone());
|
2017-07-10 13:21:11 +02:00
|
|
|
service.register_handler(informant.clone()).map_err(|_| "Unable to register informant handler".to_owned())?;
|
2017-03-23 22:20:00 +01:00
|
|
|
|
2018-05-09 08:47:21 +02:00
|
|
|
Ok(RunningClient {
|
|
|
|
inner: RunningClientInner::Light {
|
2018-05-22 19:07:27 +02:00
|
|
|
rpc: rpc_direct,
|
2018-05-09 08:47:21 +02:00
|
|
|
informant,
|
|
|
|
client,
|
2018-10-22 09:40:50 +02:00
|
|
|
keep_alive: Box::new((runtime, service, ws_server, http_server, ipc_server)),
|
2018-05-09 08:47:21 +02:00
|
|
|
}
|
2018-04-04 11:50:28 +02:00
|
|
|
})
|
2018-01-31 11:41:29 +01:00
|
|
|
}
|
2017-03-22 18:32:04 +01:00
|
|
|
|
2018-04-04 11:50:28 +02:00
|
|
|
fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq: Cr,
|
|
|
|
on_updater_rq: Rr) -> Result<RunningClient, String>
|
|
|
|
where Cr: Fn(String) + 'static + Send,
|
2018-09-12 11:47:01 +02:00
|
|
|
Rr: Fn() + 'static + Send
|
2018-04-04 11:50:28 +02:00
|
|
|
{
|
2016-07-25 16:09:47 +02:00
|
|
|
// load spec
|
2017-07-10 12:57:40 +02:00
|
|
|
let spec = cmd.spec.spec(&cmd.dirs.cache)?;
|
2016-07-25 16:09:47 +02:00
|
|
|
|
|
|
|
// load genesis hash
|
|
|
|
let genesis_hash = spec.genesis_header().hash();
|
|
|
|
|
2016-09-26 19:21:25 +02:00
|
|
|
// database paths
|
2016-12-12 16:51:07 +01:00
|
|
|
let db_dirs = cmd.dirs.database(genesis_hash, cmd.spec.legacy_fork_name(), spec.data_dir.clone());
|
2016-09-26 19:21:25 +02:00
|
|
|
|
|
|
|
// user defaults path
|
|
|
|
let user_defaults_path = db_dirs.user_defaults_path();
|
|
|
|
|
|
|
|
// load user defaults
|
2016-12-27 12:53:56 +01:00
|
|
|
let mut user_defaults = UserDefaults::load(&user_defaults_path)?;
|
2016-09-26 19:21:25 +02:00
|
|
|
|
2016-10-03 11:13:10 +02:00
|
|
|
// select pruning algorithm
|
|
|
|
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
|
|
|
|
|
2016-09-26 19:21:25 +02:00
|
|
|
// check if tracing is on
|
2016-12-27 12:53:56 +01:00
|
|
|
let tracing = tracing_switch_to_bool(cmd.tracing, &user_defaults)?;
|
2016-09-26 19:21:25 +02:00
|
|
|
|
2016-10-03 11:13:10 +02:00
|
|
|
// check if fatdb is on
|
2016-12-27 12:53:56 +01:00
|
|
|
let fat_db = fatdb_switch_to_bool(cmd.fat_db, &user_defaults, algorithm)?;
|
2016-07-25 16:09:47 +02:00
|
|
|
|
2016-11-05 10:38:00 +01:00
|
|
|
// get the mode
|
2016-12-27 12:53:56 +01:00
|
|
|
let mode = mode_switch_to_bool(cmd.mode, &user_defaults)?;
|
2016-11-05 10:38:00 +01:00
|
|
|
trace!(target: "mode", "mode is {:?}", mode);
|
2016-11-28 13:20:49 +01:00
|
|
|
let network_enabled = match mode { Mode::Dark(_) | Mode::Off => false, _ => true, };
|
2016-11-05 10:38:00 +01:00
|
|
|
|
2016-11-23 20:35:21 +01:00
|
|
|
// get the update policy
|
|
|
|
let update_policy = cmd.update_policy;
|
|
|
|
|
2016-09-07 15:27:28 +02:00
|
|
|
// prepare client and snapshot paths.
|
2016-09-26 19:21:25 +02:00
|
|
|
let client_path = db_dirs.client_path(algorithm);
|
|
|
|
let snapshot_path = db_dirs.snapshot_path();
|
2016-07-25 16:09:47 +02:00
|
|
|
|
|
|
|
// execute upgrades
|
2018-04-13 21:14:53 +02:00
|
|
|
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?;
|
2016-07-25 16:09:47 +02:00
|
|
|
|
2016-12-13 23:38:29 +01:00
|
|
|
// create dirs used by parity
|
2018-07-11 12:19:54 +02:00
|
|
|
cmd.dirs.create_dirs(cmd.acc_conf.unlocked_accounts.len() == 0, cmd.secretstore_conf.enabled)?;
|
2016-07-25 16:09:47 +02:00
|
|
|
|
|
|
|
// run in daemon mode
|
|
|
|
if let Some(pid_file) = cmd.daemon {
|
2016-12-27 12:53:56 +01:00
|
|
|
daemonize(pid_file)?;
|
2016-07-25 16:09:47 +02:00
|
|
|
}
|
|
|
|
|
2017-06-22 20:08:56 +02:00
|
|
|
//print out running parity environment
|
2018-09-09 00:43:24 +02:00
|
|
|
print_running_environment(&spec.data_dir, &cmd.dirs, &db_dirs);
|
2017-06-22 20:08:56 +02:00
|
|
|
|
2016-07-25 16:09:47 +02:00
|
|
|
// display info about used pruning algorithm
|
2016-12-14 15:15:19 +01:00
|
|
|
info!("State DB configuration: {}{}{}",
|
2016-10-03 11:13:10 +02:00
|
|
|
Colour::White.bold().paint(algorithm.as_str()),
|
|
|
|
match fat_db {
|
|
|
|
true => Colour::White.bold().paint(" +Fat").to_string(),
|
|
|
|
false => "".to_owned(),
|
|
|
|
},
|
|
|
|
match tracing {
|
|
|
|
true => Colour::White.bold().paint(" +Trace").to_string(),
|
|
|
|
false => "".to_owned(),
|
|
|
|
}
|
|
|
|
);
|
2016-11-05 10:38:00 +01:00
|
|
|
info!("Operating mode: {}", Colour::White.bold().paint(format!("{}", mode)));
|
2016-07-25 16:09:47 +02:00
|
|
|
|
2017-05-09 17:46:46 +02:00
|
|
|
// display warning about using experimental journaldb algorithm
|
2016-07-25 16:09:47 +02:00
|
|
|
if !algorithm.is_stable() {
|
|
|
|
warn!("Your chosen strategy is {}! You can re-run with --pruning to change.", Colour::Red.bold().paint("unstable"));
|
|
|
|
}
|
|
|
|
|
|
|
|
// create sync config
|
|
|
|
let mut sync_config = SyncConfig::default();
|
|
|
|
sync_config.network_id = match cmd.network_id {
|
|
|
|
Some(id) => id,
|
|
|
|
None => spec.network_id(),
|
|
|
|
};
|
2016-09-28 14:21:59 +02:00
|
|
|
if spec.subprotocol_name().len() != 3 {
|
|
|
|
warn!("Your chain specification's subprotocol length is not 3. Ignoring.");
|
|
|
|
} else {
|
|
|
|
sync_config.subprotocol_name.clone_from_slice(spec.subprotocol_name().as_bytes());
|
|
|
|
}
|
2017-05-10 17:12:00 +02:00
|
|
|
|
2016-08-03 19:01:48 +02:00
|
|
|
sync_config.fork_block = spec.fork_block();
|
2018-03-29 11:20:27 +02:00
|
|
|
let mut warp_sync = spec.engine.supports_warp() && cmd.warp_sync;
|
2017-10-10 17:42:20 +02:00
|
|
|
if warp_sync {
|
|
|
|
// Logging is not initialized yet, so we print directly to stderr
|
|
|
|
if fat_db {
|
|
|
|
warn!("Warning: Warp Sync is disabled because Fat DB is turned on.");
|
|
|
|
warp_sync = false;
|
|
|
|
} else if tracing {
|
|
|
|
warn!("Warning: Warp Sync is disabled because tracing is turned on.");
|
|
|
|
warp_sync = false;
|
|
|
|
} else if algorithm != Algorithm::OverlayRecent {
|
|
|
|
warn!("Warning: Warp Sync is disabled because of non-default pruning mode.");
|
|
|
|
warp_sync = false;
|
|
|
|
}
|
|
|
|
}
|
2018-03-29 11:20:27 +02:00
|
|
|
sync_config.warp_sync = match (warp_sync, cmd.warp_barrier) {
|
2018-04-10 12:13:49 +02:00
|
|
|
(true, Some(block)) => sync::WarpSync::OnlyAndAfter(block),
|
|
|
|
(true, _) => sync::WarpSync::Enabled,
|
|
|
|
_ => sync::WarpSync::Disabled,
|
2018-03-29 11:20:27 +02:00
|
|
|
};
|
2016-11-22 18:03:35 +01:00
|
|
|
sync_config.download_old_blocks = cmd.download_old_blocks;
|
2017-03-22 16:45:50 +01:00
|
|
|
sync_config.serve_light = cmd.serve_light;
|
2016-07-25 16:09:47 +02:00
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
let passwords = passwords_from_files(&cmd.acc_conf.password_files)?;
|
2016-12-05 20:23:03 +01:00
|
|
|
|
2016-07-25 16:09:47 +02:00
|
|
|
// prepare account provider
|
2017-01-04 16:51:27 +01:00
|
|
|
let account_provider = Arc::new(prepare_account_provider(&cmd.spec, &cmd.dirs, &spec.data_dir, cmd.acc_conf, &passwords)?);
|
2016-07-25 16:09:47 +02:00
|
|
|
|
2018-04-10 13:51:29 +02:00
|
|
|
// spin up event loop
|
2018-10-22 09:40:50 +02:00
|
|
|
let runtime = Runtime::with_default_thread_count();
|
2018-04-10 13:51:29 +02:00
|
|
|
|
2017-07-16 18:22:45 +02:00
|
|
|
// fetch service
|
2018-09-28 15:26:38 +02:00
|
|
|
let fetch = fetch::Client::new(FETCH_FULL_NUM_DNS_THREADS).map_err(|e| format!("Error starting fetch client: {:?}", e))?;
|
2017-07-16 18:22:45 +02:00
|
|
|
|
2018-07-02 19:00:06 +02:00
|
|
|
let txpool_size = cmd.miner_options.pool_limits.max_count;
|
2016-07-25 16:09:47 +02:00
|
|
|
// create miner
|
2018-04-13 17:34:27 +02:00
|
|
|
let miner = Arc::new(Miner::new(
|
|
|
|
cmd.miner_options,
|
2018-10-22 09:40:50 +02:00
|
|
|
cmd.gas_pricer_conf.to_gas_pricer(fetch.clone(), runtime.executor()),
|
2018-04-13 17:34:27 +02:00
|
|
|
&spec,
|
2018-07-13 12:23:57 +02:00
|
|
|
Some(account_provider.clone()),
|
|
|
|
|
2018-04-13 17:34:27 +02:00
|
|
|
));
|
|
|
|
miner.set_author(cmd.miner_extras.author, None).expect("Fails only if password is Some; password is None; qed");
|
|
|
|
miner.set_gas_range_target(cmd.miner_extras.gas_range_target);
|
2016-07-25 16:09:47 +02:00
|
|
|
miner.set_extra_data(cmd.miner_extras.extra_data);
|
2018-07-05 07:19:59 +02:00
|
|
|
|
2018-07-13 12:36:58 +02:00
|
|
|
if !cmd.miner_extras.work_notify.is_empty() {
|
|
|
|
miner.add_work_listener(Box::new(
|
2018-10-22 09:40:50 +02:00
|
|
|
WorkPoster::new(&cmd.miner_extras.work_notify, fetch.clone(), runtime.executor())
|
2018-07-13 12:36:58 +02:00
|
|
|
));
|
2018-04-10 13:51:29 +02:00
|
|
|
}
|
2018-07-05 07:19:59 +02:00
|
|
|
|
2016-12-05 22:31:38 +01:00
|
|
|
let engine_signer = cmd.miner_extras.engine_signer;
|
2016-12-07 10:34:06 +01:00
|
|
|
if engine_signer != Default::default() {
|
2017-01-04 12:50:50 +01:00
|
|
|
// Check if engine signer exists
|
2018-06-13 09:58:52 +02:00
|
|
|
if !account_provider.has_account(engine_signer) {
|
2017-01-04 16:51:27 +01:00
|
|
|
return Err(format!("Consensus signer account not found for the current chain. {}", build_create_account_hint(&cmd.spec, &cmd.dirs.keys)));
|
2017-01-04 12:50:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check if any passwords have been read from the password file(s)
|
|
|
|
if passwords.is_empty() {
|
2017-01-04 14:48:32 +01:00
|
|
|
return Err(format!("No password found for the consensus signer {}. {}", engine_signer, VERIFY_PASSWORD_HINT));
|
2017-01-04 12:50:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to sign in the engine signer.
|
2018-04-13 17:34:27 +02:00
|
|
|
if !passwords.iter().any(|p| miner.set_author(engine_signer, Some(p.to_owned())).is_ok()) {
|
2017-01-04 14:48:32 +01:00
|
|
|
return Err(format!("No valid password for the consensus signer {}. {}", engine_signer, VERIFY_PASSWORD_HINT));
|
2016-12-07 10:34:06 +01:00
|
|
|
}
|
2016-12-05 20:23:03 +01:00
|
|
|
}
|
2016-07-25 16:09:47 +02:00
|
|
|
|
2018-03-27 13:56:59 +02:00
|
|
|
// display warning if using --no-hardcoded-sync
|
2018-03-29 14:33:57 +02:00
|
|
|
if cmd.no_hardcoded_sync {
|
2018-03-27 13:56:59 +02:00
|
|
|
warn!("The --no-hardcoded-sync flag has no effect if you don't use --light");
|
|
|
|
}
|
|
|
|
|
2016-07-25 16:09:47 +02:00
|
|
|
// create client config
|
2016-12-02 18:21:54 +01:00
|
|
|
let mut client_config = to_client_config(
|
2016-07-25 16:09:47 +02:00
|
|
|
&cmd.cache_config,
|
2017-03-13 12:10:53 +01:00
|
|
|
spec.name.to_lowercase(),
|
2016-11-27 11:11:56 +01:00
|
|
|
mode.clone(),
|
2016-09-26 19:21:25 +02:00
|
|
|
tracing,
|
2016-10-03 11:13:10 +02:00
|
|
|
fat_db,
|
2016-07-25 16:09:47 +02:00
|
|
|
cmd.compaction,
|
|
|
|
cmd.vm_type,
|
|
|
|
cmd.name,
|
2016-09-26 19:21:25 +02:00
|
|
|
algorithm,
|
2016-10-14 14:44:56 +02:00
|
|
|
cmd.pruning_history,
|
2017-01-20 13:25:53 +01:00
|
|
|
cmd.pruning_memory,
|
2016-10-24 15:09:13 +02:00
|
|
|
cmd.check_seal,
|
2018-10-26 13:21:36 +02:00
|
|
|
cmd.max_round_blocks_to_import,
|
2016-07-25 16:09:47 +02:00
|
|
|
);
|
|
|
|
|
2016-12-02 18:21:54 +01:00
|
|
|
client_config.queue.verifier_settings = cmd.verifier_settings;
|
2018-07-02 19:00:06 +02:00
|
|
|
client_config.transaction_verification_queue_size = ::std::cmp::max(2048, txpool_size / 4);
|
2018-09-13 12:58:49 +02:00
|
|
|
client_config.snapshot = cmd.snapshot_conf.clone();
|
2016-12-02 18:21:54 +01:00
|
|
|
|
2016-07-25 16:09:47 +02:00
|
|
|
// set up bootnodes
|
|
|
|
let mut net_conf = cmd.net_conf;
|
|
|
|
if !cmd.custom_bootnodes {
|
|
|
|
net_conf.boot_nodes = spec.nodes.clone();
|
|
|
|
}
|
|
|
|
|
2016-10-11 18:42:20 +02:00
|
|
|
// set network path.
|
|
|
|
net_conf.net_config_path = Some(db_dirs.network_path().to_string_lossy().into_owned());
|
|
|
|
|
2018-04-13 21:14:53 +02:00
|
|
|
let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config);
|
2018-06-20 15:13:07 +02:00
|
|
|
let client_db = restoration_db_handler.open(&client_path)
|
|
|
|
.map_err(|e| format!("Failed to open database {:?}", e))?;
|
2018-04-09 14:21:37 +02:00
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
// create client service.
|
2016-12-27 12:53:56 +01:00
|
|
|
let service = ClientService::start(
|
2016-07-25 16:09:47 +02:00
|
|
|
client_config,
|
2016-08-05 23:33:55 +02:00
|
|
|
&spec,
|
2018-04-09 14:21:37 +02:00
|
|
|
client_db,
|
2016-09-07 15:27:28 +02:00
|
|
|
&snapshot_path,
|
2018-04-09 14:21:37 +02:00
|
|
|
restoration_db_handler,
|
2016-09-07 15:27:28 +02:00
|
|
|
&cmd.dirs.ipc_path(),
|
2016-07-25 16:09:47 +02:00
|
|
|
miner.clone(),
|
2018-04-09 16:14:33 +02:00
|
|
|
account_provider.clone(),
|
|
|
|
Box::new(SecretStoreEncryptor::new(cmd.private_encryptor_conf, fetch.clone()).map_err(|e| e.to_string())?),
|
|
|
|
cmd.private_provider_conf,
|
2016-12-27 12:53:56 +01:00
|
|
|
).map_err(|e| format!("Client service error: {:?}", e))?;
|
2016-07-25 16:09:47 +02:00
|
|
|
|
2017-08-29 14:38:01 +02:00
|
|
|
let connection_filter_address = spec.params().node_permission_contract;
|
2016-11-15 19:07:23 +01:00
|
|
|
// drop the spec to free up genesis state.
|
|
|
|
drop(spec);
|
|
|
|
|
2016-07-25 16:09:47 +02:00
|
|
|
// take handle to client
|
|
|
|
let client = service.client();
|
2018-04-13 17:34:27 +02:00
|
|
|
// Update miners block gas limit
|
|
|
|
miner.update_transaction_queue_limits(*client.best_block_header().gas_limit());
|
|
|
|
|
2018-04-09 16:14:33 +02:00
|
|
|
// take handle to private transactions service
|
|
|
|
let private_tx_service = service.private_tx_service();
|
|
|
|
let private_tx_provider = private_tx_service.provider();
|
2017-08-29 14:38:01 +02:00
|
|
|
let connection_filter = connection_filter_address.map(|a| Arc::new(NodeFilter::new(Arc::downgrade(&client) as Weak<BlockChainClient>, a)));
|
2016-09-06 15:31:13 +02:00
|
|
|
let snapshot_service = service.snapshot_service();
|
2016-07-25 16:09:47 +02:00
|
|
|
|
2017-02-20 17:21:55 +01:00
|
|
|
// initialize the local node information store.
|
|
|
|
let store = {
|
|
|
|
let db = service.db();
|
|
|
|
let node_info = FullNodeInfo {
|
2017-05-04 12:13:50 +02:00
|
|
|
miner: match cmd.no_persistent_txqueue {
|
|
|
|
true => None,
|
|
|
|
false => Some(miner.clone()),
|
|
|
|
}
|
2017-02-20 17:21:55 +01:00
|
|
|
};
|
|
|
|
|
2018-06-20 15:13:07 +02:00
|
|
|
let store = ::local_store::create(db.key_value().clone(), ::ethcore::db::COL_NODE_INFO, node_info);
|
2017-02-20 17:21:55 +01:00
|
|
|
|
2017-05-04 12:13:50 +02:00
|
|
|
if cmd.no_persistent_txqueue {
|
|
|
|
info!("Running without a persistent transaction queue.");
|
|
|
|
|
|
|
|
if let Err(e) = store.clear() {
|
|
|
|
warn!("Error clearing persistent transaction queue: {}", e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-20 17:21:55 +01:00
|
|
|
// re-queue pending transactions.
|
|
|
|
match store.pending_transactions() {
|
|
|
|
Ok(pending) => {
|
|
|
|
for pending_tx in pending {
|
|
|
|
if let Err(e) = miner.import_own_transaction(&*client, pending_tx) {
|
|
|
|
warn!("Error importing saved transaction: {}", e)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Err(e) => warn!("Error loading cached pending transactions from disk: {}", e),
|
|
|
|
}
|
|
|
|
|
|
|
|
Arc::new(store)
|
|
|
|
};
|
|
|
|
|
|
|
|
// register it as an IO service to update periodically.
|
|
|
|
service.register_io_handler(store).map_err(|_| "Unable to register local store handler".to_owned())?;
|
|
|
|
|
2016-07-25 16:09:47 +02:00
|
|
|
// create external miner
|
|
|
|
let external_miner = Arc::new(ExternalMiner::default());
|
|
|
|
|
2017-01-25 11:03:36 +01:00
|
|
|
// start stratum
|
|
|
|
if let Some(ref stratum_config) = cmd.stratum {
|
2018-04-13 17:34:27 +02:00
|
|
|
stratum::Stratum::register(stratum_config, miner.clone(), Arc::downgrade(&client))
|
2017-01-25 11:03:36 +01:00
|
|
|
.map_err(|e| format!("Stratum start error: {:?}", e))?;
|
|
|
|
}
|
|
|
|
|
2017-07-14 20:40:28 +02:00
|
|
|
let mut attached_protos = Vec::new();
|
|
|
|
|
|
|
|
let whisper_factory = if cmd.whisper.enabled {
|
2017-09-10 18:02:14 +02:00
|
|
|
let whisper_factory = ::whisper::setup(cmd.whisper.target_message_pool_size, &mut attached_protos)
|
2017-07-14 20:40:28 +02:00
|
|
|
.map_err(|e| format!("Failed to initialize whisper: {}", e))?;
|
|
|
|
|
|
|
|
whisper_factory
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
2016-07-25 16:09:47 +02:00
|
|
|
// create sync object
|
2016-12-27 12:53:56 +01:00
|
|
|
let (sync_provider, manage_network, chain_notify) = modules::sync(
|
2016-12-15 23:50:16 +01:00
|
|
|
sync_config,
|
2017-07-11 12:23:46 +02:00
|
|
|
net_conf.clone().into(),
|
2016-12-15 23:50:16 +01:00
|
|
|
client.clone(),
|
|
|
|
snapshot_service.clone(),
|
2018-04-09 16:14:33 +02:00
|
|
|
private_tx_service.clone(),
|
2016-12-15 23:50:16 +01:00
|
|
|
client.clone(),
|
2016-12-08 23:21:47 +01:00
|
|
|
&cmd.logger_config,
|
2017-07-14 20:40:28 +02:00
|
|
|
attached_protos,
|
2018-04-10 12:13:49 +02:00
|
|
|
connection_filter.clone().map(|f| f as Arc<::sync::ConnectionFilter + 'static>),
|
2016-12-27 12:53:56 +01:00
|
|
|
).map_err(|e| format!("Sync error: {}", e))?;
|
2016-07-25 16:09:47 +02:00
|
|
|
|
|
|
|
service.add_notify(chain_notify.clone());
|
|
|
|
|
2018-04-09 16:14:33 +02:00
|
|
|
// provider not added to a notification center is effectively disabled
|
|
|
|
// TODO [debris] refactor it later on
|
|
|
|
if cmd.private_tx_enabled {
|
|
|
|
service.add_notify(private_tx_provider.clone());
|
|
|
|
// TODO [ToDr] PrivateTX should use separate notifications
|
|
|
|
// re-using ChainNotify for this is a bit abusive.
|
|
|
|
private_tx_provider.add_notify(chain_notify.clone());
|
|
|
|
}
|
|
|
|
|
2016-07-25 16:09:47 +02:00
|
|
|
// start network
|
2016-11-05 10:38:00 +01:00
|
|
|
if network_enabled {
|
2016-07-25 16:09:47 +02:00
|
|
|
chain_notify.start();
|
|
|
|
}
|
|
|
|
|
2018-07-11 12:19:54 +02:00
|
|
|
let contract_client = {
|
|
|
|
struct FullRegistrar { client: Arc<Client> }
|
|
|
|
impl RegistrarClient for FullRegistrar {
|
|
|
|
type Call = Asynchronous;
|
|
|
|
fn registrar_address(&self) -> Result<Address, String> {
|
|
|
|
self.client.registrar_address()
|
|
|
|
.ok_or_else(|| "Registrar not defined.".into())
|
|
|
|
}
|
|
|
|
fn call_contract(&self, address: Address, data: Bytes) -> Self::Call {
|
|
|
|
Box::new(self.client.call_contract(BlockId::Latest, address, data).into_future())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Arc::new(FullRegistrar { client: client.clone() })
|
|
|
|
};
|
2018-02-09 09:32:06 +01:00
|
|
|
|
2016-12-10 23:58:39 +01:00
|
|
|
// the updater service
|
2018-03-14 13:40:54 +01:00
|
|
|
let updater_fetch = fetch.clone();
|
2016-12-22 18:26:39 +01:00
|
|
|
let updater = Updater::new(
|
2018-07-10 12:17:53 +02:00
|
|
|
&Arc::downgrade(&(service.client() as Arc<BlockChainClient>)),
|
|
|
|
&Arc::downgrade(&sync_provider),
|
2016-12-22 18:26:39 +01:00
|
|
|
update_policy,
|
2018-10-22 09:40:50 +02:00
|
|
|
hash_fetch::Client::with_fetch(contract_client.clone(), updater_fetch, runtime.executor())
|
2016-12-22 18:26:39 +01:00
|
|
|
);
|
2016-12-11 02:02:40 +01:00
|
|
|
service.add_notify(updater.clone());
|
2016-12-10 23:58:39 +01:00
|
|
|
|
2016-07-25 16:09:47 +02:00
|
|
|
// set up dependencies for rpc servers
|
2017-02-04 22:18:19 +01:00
|
|
|
let rpc_stats = Arc::new(informant::RpcStats::default());
|
2018-06-01 16:49:55 +02:00
|
|
|
let secret_store = account_provider.clone();
|
2018-03-20 18:57:37 +01:00
|
|
|
let signer_service = Arc::new(signer::new_service(&cmd.ws_conf, &cmd.logger_config));
|
2017-05-24 12:24:07 +02:00
|
|
|
|
2017-03-22 20:14:40 +01:00
|
|
|
let deps_for_rpc_apis = Arc::new(rpc_apis::FullDependencies {
|
2017-05-24 12:24:07 +02:00
|
|
|
signer_service: signer_service,
|
2016-10-31 17:32:53 +01:00
|
|
|
snapshot: snapshot_service.clone(),
|
2016-07-25 16:09:47 +02:00
|
|
|
client: client.clone(),
|
|
|
|
sync: sync_provider.clone(),
|
|
|
|
net: manage_network.clone(),
|
2017-03-29 17:07:58 +02:00
|
|
|
secret_store: secret_store,
|
2016-07-25 16:09:47 +02:00
|
|
|
miner: miner.clone(),
|
|
|
|
external_miner: external_miner.clone(),
|
|
|
|
logger: logger.clone(),
|
|
|
|
settings: Arc::new(cmd.net_settings.clone()),
|
2016-08-03 15:31:00 +02:00
|
|
|
net_service: manage_network.clone(),
|
2016-12-11 19:14:42 +01:00
|
|
|
updater: updater.clone(),
|
2016-08-03 15:31:00 +02:00
|
|
|
geth_compatibility: cmd.geth_compatibility,
|
2018-11-16 14:00:34 +01:00
|
|
|
experimental_rpcs: cmd.experimental_rpcs,
|
2017-05-24 12:24:07 +02:00
|
|
|
ws_address: cmd.ws_conf.address(),
|
2016-12-22 18:26:39 +01:00
|
|
|
fetch: fetch.clone(),
|
2018-10-22 09:40:50 +02:00
|
|
|
executor: runtime.executor(),
|
2017-07-14 20:40:28 +02:00
|
|
|
whisper_rpc: whisper_factory,
|
2018-04-09 16:14:33 +02:00
|
|
|
private_tx_service: Some(private_tx_service.clone()),
|
2018-01-09 12:43:36 +01:00
|
|
|
gas_price_percentile: cmd.gas_price_percentile,
|
2018-06-18 13:42:54 +02:00
|
|
|
poll_lifetime: cmd.poll_lifetime,
|
2016-07-25 16:09:47 +02:00
|
|
|
});
|
|
|
|
|
|
|
|
let dependencies = rpc::Dependencies {
|
|
|
|
apis: deps_for_rpc_apis.clone(),
|
2018-10-22 09:40:50 +02:00
|
|
|
executor: runtime.executor(),
|
2017-02-04 22:18:19 +01:00
|
|
|
stats: rpc_stats.clone(),
|
2016-07-25 16:09:47 +02:00
|
|
|
};
|
|
|
|
|
2017-04-03 10:27:37 +02:00
|
|
|
// start rpc servers
|
2018-05-22 19:07:27 +02:00
|
|
|
let rpc_direct = rpc::setup_apis(rpc_apis::ApiSet::All, &dependencies);
|
2017-05-24 12:24:07 +02:00
|
|
|
let ws_server = rpc::new_ws(cmd.ws_conf.clone(), &dependencies)?;
|
2017-04-03 10:27:37 +02:00
|
|
|
let ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?;
|
2018-07-11 12:19:54 +02:00
|
|
|
let http_server = rpc::new_http("HTTP JSON-RPC", "jsonrpc", cmd.http_conf.clone(), &dependencies)?;
|
2016-07-25 16:09:47 +02:00
|
|
|
|
2017-02-20 16:13:21 +01:00
|
|
|
// secret store key server
|
2017-04-03 17:46:51 +02:00
|
|
|
let secretstore_deps = secretstore::Dependencies {
|
|
|
|
client: client.clone(),
|
2017-11-20 13:18:31 +01:00
|
|
|
sync: sync_provider.clone(),
|
2018-04-09 16:38:59 +02:00
|
|
|
miner: miner,
|
2017-08-09 11:09:40 +02:00
|
|
|
account_provider: account_provider,
|
|
|
|
accounts_passwords: &passwords,
|
2017-04-03 17:46:51 +02:00
|
|
|
};
|
2017-04-08 11:26:16 +02:00
|
|
|
let secretstore_key_server = secretstore::start(cmd.secretstore_conf.clone(), secretstore_deps)?;
|
2017-02-20 16:13:21 +01:00
|
|
|
|
2017-02-14 19:30:37 +01:00
|
|
|
// the ipfs server
|
2017-02-24 10:32:42 +01:00
|
|
|
let ipfs_server = ipfs::start_server(cmd.ipfs_conf.clone(), client.clone())?;
|
2017-02-14 19:30:37 +01:00
|
|
|
|
2016-12-10 23:58:39 +01:00
|
|
|
// the informant
|
2016-10-18 18:16:00 +02:00
|
|
|
let informant = Arc::new(Informant::new(
|
2017-07-10 13:21:11 +02:00
|
|
|
FullNodeInformantData {
|
|
|
|
client: service.client(),
|
|
|
|
sync: Some(sync_provider.clone()),
|
|
|
|
net: Some(manage_network.clone()),
|
|
|
|
},
|
2016-10-18 18:16:00 +02:00
|
|
|
Some(snapshot_service.clone()),
|
2017-02-04 22:18:19 +01:00
|
|
|
Some(rpc_stats.clone()),
|
|
|
|
cmd.logger_config.color,
|
2016-10-18 18:16:00 +02:00
|
|
|
));
|
2016-12-10 23:58:39 +01:00
|
|
|
service.add_notify(informant.clone());
|
|
|
|
service.register_io_handler(informant.clone()).map_err(|_| "Unable to register informant handler".to_owned())?;
|
2016-07-25 16:09:47 +02:00
|
|
|
|
2016-11-05 10:38:00 +01:00
|
|
|
// save user defaults
|
2017-07-18 15:38:38 +02:00
|
|
|
user_defaults.is_first_launch = false;
|
2016-11-05 10:38:00 +01:00
|
|
|
user_defaults.pruning = algorithm;
|
|
|
|
user_defaults.tracing = tracing;
|
2016-11-27 11:11:56 +01:00
|
|
|
user_defaults.fat_db = fat_db;
|
2018-07-23 13:57:50 +02:00
|
|
|
user_defaults.set_mode(mode);
|
2016-12-27 12:53:56 +01:00
|
|
|
user_defaults.save(&user_defaults_path)?;
|
2016-11-05 10:38:00 +01:00
|
|
|
|
2016-12-10 23:58:39 +01:00
|
|
|
// tell client how to save the default mode if it gets changed.
|
2017-03-13 12:10:53 +01:00
|
|
|
client.on_user_defaults_change(move |mode: Option<Mode>| {
|
|
|
|
if let Some(mode) = mode {
|
2018-07-23 13:57:50 +02:00
|
|
|
user_defaults.set_mode(mode);
|
2017-03-13 12:10:53 +01:00
|
|
|
}
|
2016-11-05 10:38:00 +01:00
|
|
|
let _ = user_defaults.save(&user_defaults_path); // discard failures - there's nothing we can do
|
2016-12-10 23:58:39 +01:00
|
|
|
});
|
2016-11-05 10:38:00 +01:00
|
|
|
|
2016-09-05 14:25:56 +02:00
|
|
|
// the watcher must be kept alive.
|
2018-09-13 12:58:49 +02:00
|
|
|
let watcher = match cmd.snapshot_conf.no_periodic {
|
2016-09-05 14:25:56 +02:00
|
|
|
true => None,
|
|
|
|
false => {
|
2016-09-07 15:27:14 +02:00
|
|
|
let sync = sync_provider.clone();
|
2017-03-13 12:10:53 +01:00
|
|
|
let client = client.clone();
|
2016-09-05 14:25:56 +02:00
|
|
|
let watcher = Arc::new(snapshot::Watcher::new(
|
|
|
|
service.client(),
|
2016-10-20 23:36:18 +02:00
|
|
|
move || is_major_importing(Some(sync.status().state), client.queue_info()),
|
2016-09-05 14:25:56 +02:00
|
|
|
service.io().channel(),
|
|
|
|
SNAPSHOT_PERIOD,
|
|
|
|
SNAPSHOT_HISTORY,
|
|
|
|
));
|
|
|
|
|
|
|
|
service.add_notify(watcher.clone());
|
|
|
|
Some(watcher)
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2018-04-04 11:50:28 +02:00
|
|
|
client.set_exit_handler(on_client_rq);
|
|
|
|
updater.set_exit_handler(on_updater_rq);
|
2016-12-10 23:58:39 +01:00
|
|
|
|
2018-05-09 08:47:21 +02:00
|
|
|
Ok(RunningClient {
|
|
|
|
inner: RunningClientInner::Full {
|
2018-05-22 19:07:27 +02:00
|
|
|
rpc: rpc_direct,
|
2018-05-09 08:47:21 +02:00
|
|
|
informant,
|
|
|
|
client,
|
2018-05-29 12:23:15 +02:00
|
|
|
client_service: Arc::new(service),
|
2018-10-22 09:40:50 +02:00
|
|
|
keep_alive: Box::new((watcher, updater, ws_server, http_server, ipc_server, secretstore_key_server, ipfs_server, runtime)),
|
2018-05-09 08:47:21 +02:00
|
|
|
}
|
2018-04-04 11:50:28 +02:00
|
|
|
})
|
|
|
|
}
|
2017-05-24 12:24:07 +02:00
|
|
|
|
2018-05-09 08:47:21 +02:00
|
|
|
/// Parity client currently executing in background threads.
|
|
|
|
///
|
|
|
|
/// Should be destroyed by calling `shutdown()`, otherwise execution will continue in the
|
|
|
|
/// background.
|
|
|
|
pub struct RunningClient {
|
2018-05-22 19:07:27 +02:00
|
|
|
inner: RunningClientInner,
|
2018-05-09 08:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
enum RunningClientInner {
|
2018-04-04 11:50:28 +02:00
|
|
|
Light {
|
2018-05-22 19:07:27 +02:00
|
|
|
rpc: jsonrpc_core::MetaIoHandler<Metadata, informant::Middleware<rpc_apis::LightClientNotifier>>,
|
2018-04-04 11:50:28 +02:00
|
|
|
informant: Arc<Informant<LightNodeInformantData>>,
|
|
|
|
client: Arc<LightClient>,
|
|
|
|
keep_alive: Box<Any>,
|
|
|
|
},
|
|
|
|
Full {
|
2018-05-22 19:07:27 +02:00
|
|
|
rpc: jsonrpc_core::MetaIoHandler<Metadata, informant::Middleware<informant::ClientNotifier>>,
|
2018-04-04 11:50:28 +02:00
|
|
|
informant: Arc<Informant<FullNodeInformantData>>,
|
|
|
|
client: Arc<Client>,
|
2018-05-29 12:23:15 +02:00
|
|
|
client_service: Arc<ClientService>,
|
2018-04-04 11:50:28 +02:00
|
|
|
keep_alive: Box<Any>,
|
|
|
|
},
|
|
|
|
}
|
2016-09-03 10:31:29 +02:00
|
|
|
|
2018-04-04 11:50:28 +02:00
|
|
|
impl RunningClient {
|
2018-05-22 19:07:27 +02:00
|
|
|
/// Performs a synchronous RPC query.
|
|
|
|
/// Blocks execution until the result is ready.
|
|
|
|
pub fn rpc_query_sync(&self, request: &str) -> Option<String> {
|
|
|
|
let metadata = Metadata {
|
|
|
|
origin: Origin::CApi,
|
|
|
|
session: None,
|
|
|
|
};
|
|
|
|
|
|
|
|
match self.inner {
|
|
|
|
RunningClientInner::Light { ref rpc, .. } => {
|
|
|
|
rpc.handle_request_sync(request, metadata)
|
|
|
|
},
|
|
|
|
RunningClientInner::Full { ref rpc, .. } => {
|
|
|
|
rpc.handle_request_sync(request, metadata)
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-09 08:47:21 +02:00
|
|
|
/// Shuts down the client.
|
|
|
|
pub fn shutdown(self) {
|
|
|
|
match self.inner {
|
2018-05-22 19:07:27 +02:00
|
|
|
RunningClientInner::Light { rpc, informant, client, keep_alive } => {
|
2018-04-04 11:50:28 +02:00
|
|
|
// Create a weak reference to the client so that we can wait on shutdown
|
|
|
|
// until it is dropped
|
|
|
|
let weak_client = Arc::downgrade(&client);
|
2018-05-22 19:07:27 +02:00
|
|
|
drop(rpc);
|
2018-04-04 11:50:28 +02:00
|
|
|
drop(keep_alive);
|
|
|
|
informant.shutdown();
|
|
|
|
drop(informant);
|
|
|
|
drop(client);
|
|
|
|
wait_for_drop(weak_client);
|
|
|
|
},
|
2018-05-29 12:23:15 +02:00
|
|
|
RunningClientInner::Full { rpc, informant, client, client_service, keep_alive } => {
|
2018-04-04 11:50:28 +02:00
|
|
|
info!("Finishing work, please wait...");
|
|
|
|
// Create a weak reference to the client so that we can wait on shutdown
|
|
|
|
// until it is dropped
|
|
|
|
let weak_client = Arc::downgrade(&client);
|
2018-05-29 12:23:15 +02:00
|
|
|
// Shutdown and drop the ServiceClient
|
|
|
|
client_service.shutdown();
|
|
|
|
drop(client_service);
|
2018-04-04 11:50:28 +02:00
|
|
|
// drop this stuff as soon as exit detected.
|
2018-05-22 19:07:27 +02:00
|
|
|
drop(rpc);
|
2018-04-04 11:50:28 +02:00
|
|
|
drop(keep_alive);
|
|
|
|
// to make sure timer does not spawn requests while shutdown is in progress
|
|
|
|
informant.shutdown();
|
|
|
|
// just Arc is dropping here, to allow other reference release in its default time
|
|
|
|
drop(informant);
|
|
|
|
drop(client);
|
|
|
|
wait_for_drop(weak_client);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-01-31 11:41:29 +01:00
|
|
|
}
|
|
|
|
|
2018-05-09 08:47:21 +02:00
|
|
|
/// Executes the given run command.
|
|
|
|
///
|
|
|
|
/// `on_client_rq` is the action to perform when the client receives an RPC request to be restarted
|
|
|
|
/// with a different chain.
|
|
|
|
///
|
|
|
|
/// `on_updater_rq` is the action to perform when the updater has a new binary to execute.
|
|
|
|
///
|
|
|
|
/// On error, returns what to print on stderr.
|
|
|
|
pub fn execute<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>,
|
|
|
|
on_client_rq: Cr, on_updater_rq: Rr) -> Result<RunningClient, String>
|
|
|
|
where Cr: Fn(String) + 'static + Send,
|
2018-09-12 11:47:01 +02:00
|
|
|
Rr: Fn() + 'static + Send
|
2018-05-09 08:47:21 +02:00
|
|
|
{
|
|
|
|
if cmd.light {
|
|
|
|
execute_light_impl(cmd, logger)
|
2018-01-31 11:41:29 +01:00
|
|
|
} else {
|
2018-05-09 08:47:21 +02:00
|
|
|
execute_impl(cmd, logger, on_client_rq, on_updater_rq)
|
|
|
|
}
|
2016-07-25 16:09:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(not(windows))]
|
|
|
|
fn daemonize(pid_file: String) -> Result<(), String> {
|
|
|
|
extern crate daemonize;
|
|
|
|
|
|
|
|
daemonize::Daemonize::new()
|
2016-12-10 23:58:39 +01:00
|
|
|
.pid_file(pid_file)
|
|
|
|
.chown_pid_file(true)
|
|
|
|
.start()
|
|
|
|
.map(|_| ())
|
|
|
|
.map_err(|e| format!("Couldn't daemonize; {}", e))
|
2016-07-25 16:09:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(windows)]
|
|
|
|
fn daemonize(_pid_file: String) -> Result<(), String> {
|
|
|
|
Err("daemon is no supported on windows".into())
|
|
|
|
}
|
|
|
|
|
2018-09-09 00:43:24 +02:00
|
|
|
fn print_running_environment(data_dir: &str, dirs: &Directories, db_dirs: &DatabaseDirectories) {
|
2017-06-22 20:08:56 +02:00
|
|
|
info!("Starting {}", Colour::White.bold().paint(version()));
|
2018-09-09 00:43:24 +02:00
|
|
|
info!("Keys path {}", Colour::White.bold().paint(dirs.keys_path(data_dir).to_string_lossy().into_owned()));
|
2017-06-22 20:08:56 +02:00
|
|
|
info!("DB path {}", Colour::White.bold().paint(db_dirs.db_root_path().to_string_lossy().into_owned()));
|
|
|
|
}
|
|
|
|
|
2018-06-22 15:09:15 +02:00
|
|
|
fn prepare_account_provider(spec: &SpecType, dirs: &Directories, data_dir: &str, cfg: AccountsConfig, passwords: &[Password]) -> Result<AccountProvider, String> {
|
2016-10-04 10:44:47 +02:00
|
|
|
use ethcore::ethstore::EthStore;
|
2017-12-24 09:34:43 +01:00
|
|
|
use ethcore::ethstore::accounts_dir::RootDiskDirectory;
|
2016-07-25 16:09:47 +02:00
|
|
|
|
2016-12-12 16:51:07 +01:00
|
|
|
let path = dirs.keys_path(data_dir);
|
|
|
|
upgrade_key_location(&dirs.legacy_keys_path(cfg.testnet), &path);
|
2017-01-30 11:44:09 +01:00
|
|
|
let dir = Box::new(RootDiskDirectory::create(&path).map_err(|e| format!("Could not open keys directory: {}", e))?);
|
2017-02-10 01:07:06 +01:00
|
|
|
let account_settings = AccountProviderSettings {
|
|
|
|
enable_hardware_wallets: cfg.enable_hardware_wallets,
|
|
|
|
hardware_wallet_classic_key: spec == &SpecType::Classic,
|
2017-06-14 12:06:15 +02:00
|
|
|
unlock_keep_secret: cfg.enable_fast_unlock,
|
2017-06-07 11:34:53 +02:00
|
|
|
blacklisted_accounts: match *spec {
|
2018-08-30 21:32:47 +02:00
|
|
|
SpecType::Morden | SpecType::Ropsten | SpecType::Kovan | SpecType::Sokol | SpecType::Dev => vec![],
|
2017-06-07 11:34:53 +02:00
|
|
|
_ => vec![
|
|
|
|
"00a329c0648769a73afac7f9381e08fb43dbea72".into()
|
|
|
|
],
|
|
|
|
},
|
2017-02-10 01:07:06 +01:00
|
|
|
};
|
2017-12-22 04:33:49 +01:00
|
|
|
|
|
|
|
let ethstore = EthStore::open_with_iterations(dir, cfg.iterations).map_err(|e| format!("Could not open keys directory: {}", e))?;
|
|
|
|
if cfg.refresh_time > 0 {
|
|
|
|
ethstore.set_refresh_time(::std::time::Duration::from_secs(cfg.refresh_time));
|
|
|
|
}
|
2017-02-10 01:07:06 +01:00
|
|
|
let account_provider = AccountProvider::new(
|
2017-12-22 04:33:49 +01:00
|
|
|
Box::new(ethstore),
|
|
|
|
account_settings,
|
|
|
|
);
|
2016-07-25 16:09:47 +02:00
|
|
|
|
2018-10-26 13:44:53 +02:00
|
|
|
// Add development account if running dev chain:
|
|
|
|
if let SpecType::Dev = *spec {
|
|
|
|
insert_dev_account(&account_provider);
|
|
|
|
}
|
|
|
|
|
2016-07-25 16:09:47 +02:00
|
|
|
for a in cfg.unlocked_accounts {
|
2017-01-04 14:05:32 +01:00
|
|
|
// Check if the account exists
|
2018-06-13 09:58:52 +02:00
|
|
|
if !account_provider.has_account(a) {
|
2017-01-04 16:51:27 +01:00
|
|
|
return Err(format!("Account {} not found for the current chain. {}", a, build_create_account_hint(spec, &dirs.keys)));
|
2017-01-04 14:05:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check if any passwords have been read from the password file(s)
|
|
|
|
if passwords.is_empty() {
|
2017-01-04 14:48:32 +01:00
|
|
|
return Err(format!("No password found to unlock account {}. {}", a, VERIFY_PASSWORD_HINT));
|
2017-01-04 14:05:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if !passwords.iter().any(|p| account_provider.unlock_account_permanently(a, (*p).clone()).is_ok()) {
|
2017-01-04 14:48:32 +01:00
|
|
|
return Err(format!("No valid password to unlock account {}. {}", a, VERIFY_PASSWORD_HINT));
|
2016-07-25 16:09:47 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-04 14:05:32 +01:00
|
|
|
Ok(account_provider)
|
2016-07-25 16:09:47 +02:00
|
|
|
}
|
|
|
|
|
2017-05-19 17:06:36 +02:00
|
|
|
fn insert_dev_account(account_provider: &AccountProvider) {
|
|
|
|
let secret: ethkey::Secret = "4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7".into();
|
|
|
|
let dev_account = ethkey::KeyPair::from_secret(secret.clone()).expect("Valid secret produces valid key;qed");
|
2018-06-13 09:58:52 +02:00
|
|
|
if !account_provider.has_account(dev_account.address()) {
|
2018-06-22 15:09:15 +02:00
|
|
|
match account_provider.insert_account(secret, &Password::from(String::new())) {
|
2017-05-19 17:06:36 +02:00
|
|
|
Err(e) => warn!("Unable to add development account: {}", e),
|
|
|
|
Ok(address) => {
|
|
|
|
let _ = account_provider.set_account_name(address.clone(), "Development Account".into());
|
|
|
|
let _ = account_provider.set_account_meta(address, ::serde_json::to_string(&(vec![
|
2018-02-13 07:52:05 +01:00
|
|
|
("description", "Never use this account outside of development chain!"),
|
2017-05-19 17:06:36 +02:00
|
|
|
("passwordHint","Password is empty string"),
|
|
|
|
].into_iter().collect::<::std::collections::HashMap<_,_>>())).expect("Serialization of hashmap does not fail."));
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-04 16:51:27 +01:00
|
|
|
// Construct an error `String` with an adaptive hint on how to create an account.
|
|
|
|
fn build_create_account_hint(spec: &SpecType, keys: &str) -> String {
|
|
|
|
format!("You can create an account via RPC, UI or `parity account new --chain {} --keys-path {}`.", spec, keys)
|
|
|
|
}
|
|
|
|
|
2018-01-31 11:41:29 +01:00
|
|
|
fn wait_for_drop<T>(w: Weak<T>) {
|
|
|
|
let sleep_duration = Duration::from_secs(1);
|
|
|
|
let warn_timeout = Duration::from_secs(60);
|
|
|
|
let max_timeout = Duration::from_secs(300);
|
|
|
|
|
|
|
|
let instant = Instant::now();
|
|
|
|
let mut warned = false;
|
|
|
|
|
|
|
|
while instant.elapsed() < max_timeout {
|
|
|
|
if w.upgrade().is_none() {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if !warned && instant.elapsed() > warn_timeout {
|
|
|
|
warned = true;
|
|
|
|
warn!("Shutdown is taking longer than expected.");
|
|
|
|
}
|
|
|
|
|
|
|
|
thread::sleep(sleep_duration);
|
|
|
|
}
|
|
|
|
|
|
|
|
warn!("Shutdown timeout reached, exiting uncleanly.");
|
|
|
|
}
|