cli overhaul (#1600)
* cli commands * cleanup parity/signer * cleanup parity/signer * remove redundant import of signer crate from main.rs * cli cleanup in progress * cli cleanup in progress * moved few commonly used functions to separate methods with tests * cleaning up blockchain import in progress * cleaning up blockchain import in progress2 * cleaning up blockchain import in progress3 * tests for database compaction profile parsing * cleaning up blockchain import in progress4 * cleaning up blockchain import in progress5 * blockchain import * export blockchain in progress * cleanup execute_export * Configuration::to_duration cleaned up * removed unused code, tests for to_duration * cleanup Configuration::mode function * parsing some of the cli params in params.rs * rpc and signer are no longer optional * move importing extern crates to main.rs file * swipe dies from rpc module * swipe dies from dapps * finding deprecated * several tests and fixes for parity * parity cleanup in progress * cleanup price parsing * parity cleanup in progress * swiped all dies * parity cleanup in progress * replace usages of from_str with parse() in parity/params.rs * removed few more from_str * split parity/params.rs into params and helpers * removed wildcard import from configuration.rs * cleanup directories/path creation * cleaning up run cmd * moved LoggerConfig * defaults for cli params * fixed indention in raise_fd_limit * tests for rpc_apis * tests for default ipc and rpc settings * ipc socket * cleanup in progress * account service * cleanup miner config * BlockChain commands use Directiores structure now * client_config * network settings and dapps configuration * removing warnings * default logger config * fixed client_path * overhaul * fixing export && import * default export DataFormat * import and export also upgrade db * fixed export && import * polishing pr * polishing pr * fixed custom bootnodes * fixed daemonize on windows * fixed setting up enable network * finished pr * fixed compiling on windows * Fixed warning; windows build * Better cache management * Fixed tests on windows * Fixed test * Restored pruning method names * --cache alias * Fixed more tests * Ensure default options actually listed as valid [ci:skip]
This commit is contained in:
parent
435ba186f8
commit
226fe8e0bb
@ -31,10 +31,10 @@ install:
|
|||||||
build: off
|
build: off
|
||||||
|
|
||||||
test_script:
|
test_script:
|
||||||
- cargo test --verbose --release --no-default-features
|
- cargo test --verbose --release
|
||||||
|
|
||||||
after_test:
|
after_test:
|
||||||
- cargo build --verbose --release --no-default-features
|
- cargo build --verbose --release
|
||||||
- ps: if($env:cert) { Start-FileDownload $env:cert -FileName $env:keyfile }
|
- ps: if($env:cert) { Start-FileDownload $env:cert -FileName $env:keyfile }
|
||||||
- ps: if($env:cert) { signtool sign /f $env:keyfile /p $env:certpass target\release\parity.exe }
|
- ps: if($env:cert) { signtool sign /f $env:keyfile /p $env:certpass target\release\parity.exe }
|
||||||
- makensis.exe nsis\installer.nsi
|
- makensis.exe nsis\installer.nsi
|
||||||
|
@ -36,7 +36,7 @@ const MIN_MEM_LIMIT: usize = 16384;
|
|||||||
const MIN_QUEUE_LIMIT: usize = 512;
|
const MIN_QUEUE_LIMIT: usize = 512;
|
||||||
|
|
||||||
/// Block queue configuration
|
/// Block queue configuration
|
||||||
#[derive(Debug)]
|
#[derive(Debug, PartialEq)]
|
||||||
pub struct BlockQueueConfig {
|
pub struct BlockQueueConfig {
|
||||||
/// Maximum number of blocks to keep in unverified queue.
|
/// Maximum number of blocks to keep in unverified queue.
|
||||||
/// When the limit is reached, is_full returns true.
|
/// When the limit is reached, is_full returns true.
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
//! Blockchain configuration.
|
//! Blockchain configuration.
|
||||||
|
|
||||||
/// Blockchain configuration.
|
/// Blockchain configuration.
|
||||||
#[derive(Debug)]
|
#[derive(Debug, PartialEq)]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
/// Preferred cache size in bytes.
|
/// Preferred cache size in bytes.
|
||||||
pub pref_cache_size: usize,
|
pub pref_cache_size: usize,
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
use std::collections::{HashSet, HashMap, VecDeque};
|
use std::collections::{HashSet, HashMap, VecDeque};
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
|
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
|
||||||
use std::time::{Instant};
|
use std::time::{Instant};
|
||||||
@ -141,26 +141,10 @@ pub struct Client {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const HISTORY: u64 = 1200;
|
const HISTORY: u64 = 1200;
|
||||||
// DO NOT TOUCH THIS ANY MORE UNLESS YOU REALLY KNOW WHAT YOU'RE DOING.
|
|
||||||
// Altering it will force a blanket DB update for *all* JournalDB-derived
|
|
||||||
// databases.
|
|
||||||
// Instead, add/upgrade the version string of the individual JournalDB-derived database
|
|
||||||
// of which you actually want force an upgrade.
|
|
||||||
const CLIENT_DB_VER_STR: &'static str = "5.3";
|
|
||||||
|
|
||||||
/// Get the path for the databases given the root path and information on the databases.
|
|
||||||
pub fn get_db_path(path: &Path, pruning: journaldb::Algorithm, genesis_hash: H256, fork_name: Option<&String>) -> PathBuf {
|
|
||||||
let mut dir = path.to_path_buf();
|
|
||||||
dir.push(format!("{:?}{}", H64::from(genesis_hash), fork_name.map(|f| format!("-{}", f)).unwrap_or_default()));
|
|
||||||
//TODO: sec/fat: pruned/full versioning
|
|
||||||
// version here is a bit useless now, since it's controlled only be the pruning algo.
|
|
||||||
dir.push(format!("v{}-sec-{}", CLIENT_DB_VER_STR, pruning));
|
|
||||||
dir
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Append a path element to the given path and return the string.
|
/// Append a path element to the given path and return the string.
|
||||||
pub fn append_path(path: &Path, item: &str) -> String {
|
pub fn append_path<P>(path: P, item: &str) -> String where P: AsRef<Path> {
|
||||||
let mut p = path.to_path_buf();
|
let mut p = path.as_ref().to_path_buf();
|
||||||
p.push(item);
|
p.push(item);
|
||||||
p.to_str().unwrap().to_owned()
|
p.to_str().unwrap().to_owned()
|
||||||
}
|
}
|
||||||
@ -174,7 +158,7 @@ impl Client {
|
|||||||
miner: Arc<Miner>,
|
miner: Arc<Miner>,
|
||||||
message_channel: IoChannel<ClientIoMessage>,
|
message_channel: IoChannel<ClientIoMessage>,
|
||||||
) -> Result<Arc<Client>, ClientError> {
|
) -> Result<Arc<Client>, ClientError> {
|
||||||
let path = get_db_path(path, config.pruning, spec.genesis_header().hash(), spec.fork_name.as_ref());
|
let path = path.to_path_buf();
|
||||||
let gb = spec.genesis_block();
|
let gb = spec.genesis_block();
|
||||||
let chain = Arc::new(BlockChain::new(config.blockchain, &gb, &path));
|
let chain = Arc::new(BlockChain::new(config.blockchain, &gb, &path));
|
||||||
let tracedb = Arc::new(try!(TraceDB::new(config.tracing, &path, chain.clone())));
|
let tracedb = Arc::new(try!(TraceDB::new(config.tracing, &path, chain.clone())));
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::str::FromStr;
|
||||||
pub use std::time::Duration;
|
pub use std::time::Duration;
|
||||||
pub use block_queue::BlockQueueConfig;
|
pub use block_queue::BlockQueueConfig;
|
||||||
pub use blockchain::Config as BlockChainConfig;
|
pub use blockchain::Config as BlockChainConfig;
|
||||||
@ -33,7 +34,21 @@ pub enum DatabaseCompactionProfile {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Default for DatabaseCompactionProfile {
|
impl Default for DatabaseCompactionProfile {
|
||||||
fn default() -> Self { DatabaseCompactionProfile::Default }
|
fn default() -> Self {
|
||||||
|
DatabaseCompactionProfile::Default
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for DatabaseCompactionProfile {
|
||||||
|
type Err = String;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
match s {
|
||||||
|
"ssd" | "default" => Ok(DatabaseCompactionProfile::Default),
|
||||||
|
"hdd" => Ok(DatabaseCompactionProfile::HDD),
|
||||||
|
_ => Err(format!("Invalid compaction profile given. Expected hdd/ssd (default).")),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Operating mode for the client.
|
/// Operating mode for the client.
|
||||||
@ -50,11 +65,13 @@ pub enum Mode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Mode {
|
impl Default for Mode {
|
||||||
fn default() -> Self { Mode::Active }
|
fn default() -> Self {
|
||||||
|
Mode::Active
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Client configuration. Includes configs for all sub-systems.
|
/// Client configuration. Includes configs for all sub-systems.
|
||||||
#[derive(Debug, Default)]
|
#[derive(Debug, PartialEq, Default)]
|
||||||
pub struct ClientConfig {
|
pub struct ClientConfig {
|
||||||
/// Block queue configuration.
|
/// Block queue configuration.
|
||||||
pub queue: BlockQueueConfig,
|
pub queue: BlockQueueConfig,
|
||||||
@ -79,3 +96,25 @@ pub struct ClientConfig {
|
|||||||
/// Type of block verifier used by client.
|
/// Type of block verifier used by client.
|
||||||
pub verifier_type: VerifierType,
|
pub verifier_type: VerifierType,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use super::{DatabaseCompactionProfile, Mode};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_default_compaction_profile() {
|
||||||
|
assert_eq!(DatabaseCompactionProfile::default(), DatabaseCompactionProfile::Default);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parsing_compaction_profile() {
|
||||||
|
assert_eq!(DatabaseCompactionProfile::Default, "ssd".parse().unwrap());
|
||||||
|
assert_eq!(DatabaseCompactionProfile::Default, "default".parse().unwrap());
|
||||||
|
assert_eq!(DatabaseCompactionProfile::HDD, "hdd".parse().unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_mode_default() {
|
||||||
|
assert_eq!(Mode::default(), Mode::Active);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -21,7 +21,7 @@ use std::fmt;
|
|||||||
use evm::Evm;
|
use evm::Evm;
|
||||||
use util::{U256, Uint};
|
use util::{U256, Uint};
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
/// Type of EVM to use.
|
/// Type of EVM to use.
|
||||||
pub enum VMType {
|
pub enum VMType {
|
||||||
/// JIT EVM
|
/// JIT EVM
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use ethjson;
|
use ethjson;
|
||||||
use util::{H256, MemoryDB, TrieMut, TrieSpec, TrieFactory};
|
use util::{H256, MemoryDB, TrieSpec, TrieFactory};
|
||||||
|
|
||||||
fn test_trie(json: &[u8], trie: TrieSpec) -> Vec<String> {
|
fn test_trie(json: &[u8], trie: TrieSpec) -> Vec<String> {
|
||||||
let tests = ethjson::trie::Test::load(json).unwrap();
|
let tests = ethjson::trie::Test::load(json).unwrap();
|
||||||
|
@ -36,7 +36,7 @@ use client::TransactionImportResult;
|
|||||||
use miner::price_info::PriceInfo;
|
use miner::price_info::PriceInfo;
|
||||||
|
|
||||||
/// Different possible definitions for pending transaction set.
|
/// Different possible definitions for pending transaction set.
|
||||||
#[derive(Debug)]
|
#[derive(Debug, PartialEq)]
|
||||||
pub enum PendingSet {
|
pub enum PendingSet {
|
||||||
/// Always just the transactions in the queue. These have had only cheap checks.
|
/// Always just the transactions in the queue. These have had only cheap checks.
|
||||||
AlwaysQueue,
|
AlwaysQueue,
|
||||||
@ -48,7 +48,7 @@ pub enum PendingSet {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Configures the behaviour of the miner.
|
/// Configures the behaviour of the miner.
|
||||||
#[derive(Debug)]
|
#[derive(Debug, PartialEq)]
|
||||||
pub struct MinerOptions {
|
pub struct MinerOptions {
|
||||||
/// URLs to notify when there is new work.
|
/// URLs to notify when there is new work.
|
||||||
pub new_work_notify: Vec<String>,
|
pub new_work_notify: Vec<String>,
|
||||||
@ -77,12 +77,12 @@ impl Default for MinerOptions {
|
|||||||
MinerOptions {
|
MinerOptions {
|
||||||
new_work_notify: vec![],
|
new_work_notify: vec![],
|
||||||
force_sealing: false,
|
force_sealing: false,
|
||||||
reseal_on_external_tx: true,
|
reseal_on_external_tx: false,
|
||||||
reseal_on_own_tx: true,
|
reseal_on_own_tx: true,
|
||||||
tx_gas_limit: !U256::zero(),
|
tx_gas_limit: !U256::zero(),
|
||||||
tx_queue_size: 1024,
|
tx_queue_size: 1024,
|
||||||
pending_set: PendingSet::AlwaysQueue,
|
pending_set: PendingSet::AlwaysQueue,
|
||||||
reseal_min_period: Duration::from_secs(0),
|
reseal_min_period: Duration::from_secs(2),
|
||||||
work_queue_size: 20,
|
work_queue_size: 20,
|
||||||
enable_resubmission: true,
|
enable_resubmission: true,
|
||||||
}
|
}
|
||||||
@ -90,6 +90,7 @@ impl Default for MinerOptions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Options for the dynamic gas price recalibrator.
|
/// Options for the dynamic gas price recalibrator.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
pub struct GasPriceCalibratorOptions {
|
pub struct GasPriceCalibratorOptions {
|
||||||
/// Base transaction price to match against.
|
/// Base transaction price to match against.
|
||||||
pub usd_per_tx: f32,
|
pub usd_per_tx: f32,
|
||||||
@ -98,9 +99,9 @@ pub struct GasPriceCalibratorOptions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The gas price validator variant for a `GasPricer`.
|
/// The gas price validator variant for a `GasPricer`.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
pub struct GasPriceCalibrator {
|
pub struct GasPriceCalibrator {
|
||||||
options: GasPriceCalibratorOptions,
|
options: GasPriceCalibratorOptions,
|
||||||
|
|
||||||
next_calibration: Instant,
|
next_calibration: Instant,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -128,6 +129,7 @@ impl GasPriceCalibrator {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Struct to look after updating the acceptable gas price of a miner.
|
/// Struct to look after updating the acceptable gas price of a miner.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
pub enum GasPricer {
|
pub enum GasPricer {
|
||||||
/// A fixed gas price in terms of Wei - always the argument given.
|
/// A fixed gas price in terms of Wei - always the argument given.
|
||||||
Fixed(U256),
|
Fixed(U256),
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
//! Traces config.
|
//! Traces config.
|
||||||
|
use std::str::FromStr;
|
||||||
use bloomchain::Config as BloomConfig;
|
use bloomchain::Config as BloomConfig;
|
||||||
use trace::Error;
|
use trace::Error;
|
||||||
|
|
||||||
@ -29,6 +30,25 @@ pub enum Switch {
|
|||||||
Auto,
|
Auto,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for Switch {
|
||||||
|
fn default() -> Self {
|
||||||
|
Switch::Auto
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for Switch {
|
||||||
|
type Err = String;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
match s {
|
||||||
|
"on" => Ok(Switch::On),
|
||||||
|
"off" => Ok(Switch::Off),
|
||||||
|
"auto" => Ok(Switch::Auto),
|
||||||
|
other => Err(format!("Invalid switch value: {}", other))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Switch {
|
impl Switch {
|
||||||
/// Tries to turn old switch to new value.
|
/// Tries to turn old switch to new value.
|
||||||
pub fn turn_to(&self, to: Switch) -> Result<bool, Error> {
|
pub fn turn_to(&self, to: Switch) -> Result<bool, Error> {
|
||||||
@ -41,7 +61,7 @@ impl Switch {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Traces config.
|
/// Traces config.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
/// Indicates if tracing should be enabled or not.
|
/// Indicates if tracing should be enabled or not.
|
||||||
/// If it's None, it will be automatically configured.
|
/// If it's None, it will be automatically configured.
|
||||||
@ -55,7 +75,7 @@ pub struct Config {
|
|||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Config {
|
Config {
|
||||||
enabled: Switch::Auto,
|
enabled: Switch::default(),
|
||||||
blooms: BloomConfig {
|
blooms: BloomConfig {
|
||||||
levels: 3,
|
levels: 3,
|
||||||
elements_per_index: 16,
|
elements_per_index: 16,
|
||||||
@ -64,3 +84,20 @@ impl Default for Config {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::Switch;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_switch_parsing() {
|
||||||
|
assert_eq!(Switch::On, "on".parse().unwrap());
|
||||||
|
assert_eq!(Switch::Off, "off".parse().unwrap());
|
||||||
|
assert_eq!(Switch::Auto, "auto".parse().unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_switch_default() {
|
||||||
|
assert_eq!(Switch::default(), Switch::Auto);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -25,7 +25,7 @@ pub use self::canon_verifier::CanonVerifier;
|
|||||||
pub use self::noop_verifier::NoopVerifier;
|
pub use self::noop_verifier::NoopVerifier;
|
||||||
|
|
||||||
/// Verifier type.
|
/// Verifier type.
|
||||||
#[derive(Debug)]
|
#[derive(Debug, PartialEq)]
|
||||||
pub enum VerifierType {
|
pub enum VerifierType {
|
||||||
/// Verifies block normally.
|
/// Verifies block normally.
|
||||||
Canon,
|
Canon,
|
||||||
|
@ -14,16 +14,18 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::{fs, ffi, io};
|
use std::{fs, io};
|
||||||
use std::path::{PathBuf, Path};
|
use std::path::{PathBuf, Path};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use time;
|
use time;
|
||||||
use ethkey::Address;
|
use ethkey::Address;
|
||||||
use {libc, json, SafeAccount, Error};
|
use {json, SafeAccount, Error};
|
||||||
use super::KeyDirectory;
|
use super::KeyDirectory;
|
||||||
|
|
||||||
#[cfg(not(windows))]
|
#[cfg(not(windows))]
|
||||||
fn restrict_permissions_to_owner(file_path: &Path) -> Result<(), i32> {
|
fn restrict_permissions_to_owner(file_path: &Path) -> Result<(), i32> {
|
||||||
|
use std::ffi;
|
||||||
|
use libc;
|
||||||
let cstr = ffi::CString::new(file_path.to_str().unwrap()).unwrap();
|
let cstr = ffi::CString::new(file_path.to_str().unwrap()).unwrap();
|
||||||
match unsafe { libc::chmod(cstr.as_ptr(), libc::S_IWUSR | libc::S_IRUSR) } {
|
match unsafe { libc::chmod(cstr.as_ptr(), libc::S_IWUSR | libc::S_IRUSR) } {
|
||||||
0 => Ok(()),
|
0 => Ok(()),
|
||||||
@ -32,7 +34,7 @@ fn restrict_permissions_to_owner(file_path: &Path) -> Result<(), i32> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(windows)]
|
#[cfg(windows)]
|
||||||
fn restrict_permissions_to_owner(file_path: &Path) -> Result<(), i32> {
|
fn restrict_permissions_to_owner(_file_path: &Path) -> Result<(), i32> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,39 +36,25 @@ use regex::Regex;
|
|||||||
use util::RotatingLogger;
|
use util::RotatingLogger;
|
||||||
use util::log::Colour;
|
use util::log::Colour;
|
||||||
|
|
||||||
pub struct Settings {
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub struct Config {
|
||||||
|
pub mode: Option<String>,
|
||||||
pub color: bool,
|
pub color: bool,
|
||||||
pub init: Option<String>,
|
|
||||||
pub file: Option<String>,
|
pub file: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Settings {
|
impl Default for Config {
|
||||||
pub fn new() -> Settings {
|
fn default() -> Self {
|
||||||
Settings {
|
Config {
|
||||||
color: true,
|
mode: None,
|
||||||
init: None,
|
color: !cfg!(windows),
|
||||||
file: None,
|
file: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn init(mut self, init: String) -> Settings {
|
|
||||||
self.init = Some(init);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn file(mut self, file: String) -> Settings {
|
|
||||||
self.file = Some(file);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn no_color(mut self) -> Settings {
|
|
||||||
self.color = false;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sets up the logger
|
/// Sets up the logger
|
||||||
pub fn setup_log(settings: &Settings) -> Arc<RotatingLogger> {
|
pub fn setup_log(config: &Config) -> Result<Arc<RotatingLogger>, String> {
|
||||||
use rlog::*;
|
use rlog::*;
|
||||||
|
|
||||||
let mut levels = String::new();
|
let mut levels = String::new();
|
||||||
@ -84,16 +70,21 @@ pub fn setup_log(settings: &Settings) -> Arc<RotatingLogger> {
|
|||||||
builder.parse(lvl);
|
builder.parse(lvl);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(ref s) = settings.init {
|
if let Some(ref s) = config.mode {
|
||||||
levels.push_str(s);
|
levels.push_str(s);
|
||||||
builder.parse(s);
|
builder.parse(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
let isatty = stderr_isatty();
|
let isatty = stderr_isatty();
|
||||||
let enable_color = settings.color && isatty;
|
let enable_color = config.color && isatty;
|
||||||
let logs = Arc::new(RotatingLogger::new(levels));
|
let logs = Arc::new(RotatingLogger::new(levels));
|
||||||
let logger = logs.clone();
|
let logger = logs.clone();
|
||||||
let maybe_file = settings.file.as_ref().map(|f| File::create(f).unwrap_or_else(|_| panic!("Cannot write to log file given: {}", f)));
|
|
||||||
|
let maybe_file = match config.file.as_ref() {
|
||||||
|
Some(f) => Some(try!(File::create(f).map_err(|_| format!("Cannot write to log file given: {}", f)))),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
let format = move |record: &LogRecord| {
|
let format = move |record: &LogRecord| {
|
||||||
let timestamp = time::strftime("%Y-%m-%d %H:%M:%S %Z", &time::now()).unwrap();
|
let timestamp = time::strftime("%Y-%m-%d %H:%M:%S %Z", &time::now()).unwrap();
|
||||||
|
|
||||||
@ -123,9 +114,11 @@ pub fn setup_log(settings: &Settings) -> Arc<RotatingLogger> {
|
|||||||
|
|
||||||
ret
|
ret
|
||||||
};
|
};
|
||||||
|
|
||||||
builder.format(format);
|
builder.format(format);
|
||||||
builder.init().unwrap();
|
builder.init().unwrap();
|
||||||
logs
|
|
||||||
|
Ok(logs)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn kill_color(s: &str) -> String {
|
fn kill_color(s: &str) -> String {
|
||||||
|
84
parity/account.rs
Normal file
84
parity/account.rs
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use ethcore::ethstore::{EthStore, import_accounts};
|
||||||
|
use ethcore::ethstore::dir::DiskDirectory;
|
||||||
|
use ethcore::account_provider::AccountProvider;
|
||||||
|
use helpers::{password_prompt, password_from_file};
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum AccountCmd {
|
||||||
|
New(NewAccount),
|
||||||
|
List(String),
|
||||||
|
Import(ImportAccounts),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub struct NewAccount {
|
||||||
|
pub iterations: u32,
|
||||||
|
pub path: String,
|
||||||
|
pub password_file: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub struct ImportAccounts {
|
||||||
|
pub from: Vec<String>,
|
||||||
|
pub to: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn execute(cmd: AccountCmd) -> Result<String, String> {
|
||||||
|
match cmd {
|
||||||
|
AccountCmd::New(new_cmd) => new(new_cmd),
|
||||||
|
AccountCmd::List(path) => list(path),
|
||||||
|
AccountCmd::Import(import_cmd) => import(import_cmd),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new(n: NewAccount) -> Result<String, String> {
|
||||||
|
let password: String = match n.password_file {
|
||||||
|
Some(file) => try!(password_from_file(file)),
|
||||||
|
None => try!(password_prompt()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let dir = Box::new(DiskDirectory::create(n.path).unwrap());
|
||||||
|
let secret_store = Box::new(EthStore::open_with_iterations(dir, n.iterations).unwrap());
|
||||||
|
let acc_provider = AccountProvider::new(secret_store);
|
||||||
|
let new_account = acc_provider.new_account(&password).unwrap();
|
||||||
|
Ok(format!("{:?}", new_account))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn list(path: String) -> Result<String, String> {
|
||||||
|
let dir = Box::new(DiskDirectory::create(path).unwrap());
|
||||||
|
let secret_store = Box::new(EthStore::open(dir).unwrap());
|
||||||
|
let acc_provider = AccountProvider::new(secret_store);
|
||||||
|
let accounts = acc_provider.accounts();
|
||||||
|
let result = accounts.into_iter()
|
||||||
|
.map(|a| format!("{:?}", a))
|
||||||
|
.collect::<Vec<String>>()
|
||||||
|
.join("\n");
|
||||||
|
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn import(i: ImportAccounts) -> Result<String, String> {
|
||||||
|
let to = DiskDirectory::create(i.to).unwrap();
|
||||||
|
let mut imported = 0;
|
||||||
|
for path in &i.from {
|
||||||
|
let from = DiskDirectory::at(path);
|
||||||
|
imported += try!(import_accounts(&from, &to).map_err(|_| "Importing accounts failed.")).len();
|
||||||
|
}
|
||||||
|
Ok(format!("{}", imported))
|
||||||
|
}
|
284
parity/blockchain.rs
Normal file
284
parity/blockchain.rs
Normal file
@ -0,0 +1,284 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::str::{FromStr, from_utf8};
|
||||||
|
use std::{io, fs};
|
||||||
|
use std::io::{BufReader, BufRead};
|
||||||
|
use std::time::Duration;
|
||||||
|
use std::thread::sleep;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use rustc_serialize::hex::FromHex;
|
||||||
|
use ethcore_logger::{setup_log, Config as LogConfig};
|
||||||
|
use util::panics::{PanicHandler, ForwardPanic};
|
||||||
|
use util::{PayloadInfo, ToPretty};
|
||||||
|
use ethcore::service::ClientService;
|
||||||
|
use ethcore::client::{Mode, DatabaseCompactionProfile, Switch, VMType, BlockImportError, BlockChainClient, BlockID};
|
||||||
|
use ethcore::error::ImportError;
|
||||||
|
use ethcore::miner::Miner;
|
||||||
|
use cache::CacheConfig;
|
||||||
|
use informant::Informant;
|
||||||
|
use params::{SpecType, Pruning};
|
||||||
|
use helpers::{to_client_config, execute_upgrades};
|
||||||
|
use dir::Directories;
|
||||||
|
use fdlimit;
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum DataFormat {
|
||||||
|
Hex,
|
||||||
|
Binary,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for DataFormat {
|
||||||
|
fn default() -> Self {
|
||||||
|
DataFormat::Binary
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for DataFormat {
|
||||||
|
type Err = String;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
match s {
|
||||||
|
"binary" | "bin" => Ok(DataFormat::Binary),
|
||||||
|
"hex" => Ok(DataFormat::Hex),
|
||||||
|
x => Err(format!("Invalid format: {}", x))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum BlockchainCmd {
|
||||||
|
Import(ImportBlockchain),
|
||||||
|
Export(ExportBlockchain),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub struct ImportBlockchain {
|
||||||
|
pub spec: SpecType,
|
||||||
|
pub logger_config: LogConfig,
|
||||||
|
pub cache_config: CacheConfig,
|
||||||
|
pub dirs: Directories,
|
||||||
|
pub file_path: Option<String>,
|
||||||
|
pub format: Option<DataFormat>,
|
||||||
|
pub pruning: Pruning,
|
||||||
|
pub compaction: DatabaseCompactionProfile,
|
||||||
|
pub mode: Mode,
|
||||||
|
pub tracing: Switch,
|
||||||
|
pub vm_type: VMType,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub struct ExportBlockchain {
|
||||||
|
pub spec: SpecType,
|
||||||
|
pub logger_config: LogConfig,
|
||||||
|
pub cache_config: CacheConfig,
|
||||||
|
pub dirs: Directories,
|
||||||
|
pub file_path: Option<String>,
|
||||||
|
pub format: Option<DataFormat>,
|
||||||
|
pub pruning: Pruning,
|
||||||
|
pub compaction: DatabaseCompactionProfile,
|
||||||
|
pub mode: Mode,
|
||||||
|
pub tracing: Switch,
|
||||||
|
pub from_block: BlockID,
|
||||||
|
pub to_block: BlockID,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn execute(cmd: BlockchainCmd) -> Result<String, String> {
|
||||||
|
match cmd {
|
||||||
|
BlockchainCmd::Import(import_cmd) => execute_import(import_cmd),
|
||||||
|
BlockchainCmd::Export(export_cmd) => execute_export(export_cmd),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn execute_import(cmd: ImportBlockchain) -> Result<String, String> {
|
||||||
|
// Setup panic handler
|
||||||
|
let panic_handler = PanicHandler::new_in_arc();
|
||||||
|
|
||||||
|
// load spec file
|
||||||
|
let spec = try!(cmd.spec.spec());
|
||||||
|
|
||||||
|
// load genesis hash
|
||||||
|
let genesis_hash = spec.genesis_header().hash();
|
||||||
|
|
||||||
|
// Setup logging
|
||||||
|
let _logger = setup_log(&cmd.logger_config);
|
||||||
|
|
||||||
|
fdlimit::raise_fd_limit();
|
||||||
|
|
||||||
|
// select pruning algorithm
|
||||||
|
let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref());
|
||||||
|
|
||||||
|
// prepare client_path
|
||||||
|
let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm);
|
||||||
|
|
||||||
|
// execute upgrades
|
||||||
|
try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm));
|
||||||
|
|
||||||
|
// prepare client config
|
||||||
|
let client_config = to_client_config(&cmd.cache_config, &cmd.dirs, genesis_hash, cmd.mode, cmd.tracing, cmd.pruning, cmd.compaction, cmd.vm_type, "".into(), spec.fork_name.as_ref());
|
||||||
|
|
||||||
|
// build client
|
||||||
|
let service = try!(ClientService::start(
|
||||||
|
client_config,
|
||||||
|
spec,
|
||||||
|
Path::new(&client_path),
|
||||||
|
Arc::new(Miner::with_spec(try!(cmd.spec.spec()))),
|
||||||
|
).map_err(|e| format!("Client service error: {:?}", e)));
|
||||||
|
|
||||||
|
panic_handler.forward_from(&service);
|
||||||
|
let client = service.client();
|
||||||
|
|
||||||
|
let mut instream: Box<io::Read> = match cmd.file_path {
|
||||||
|
Some(f) => Box::new(try!(fs::File::open(&f).map_err(|_| format!("Cannot open given file: {}", f)))),
|
||||||
|
None => Box::new(io::stdin()),
|
||||||
|
};
|
||||||
|
|
||||||
|
const READAHEAD_BYTES: usize = 8;
|
||||||
|
|
||||||
|
let mut first_bytes: Vec<u8> = vec![0; READAHEAD_BYTES];
|
||||||
|
let mut first_read = 0;
|
||||||
|
|
||||||
|
let format = match cmd.format {
|
||||||
|
Some(format) => format,
|
||||||
|
None => {
|
||||||
|
first_read = try!(instream.read(&mut first_bytes).map_err(|_| "Error reading from the file/stream."));
|
||||||
|
match first_bytes[0] {
|
||||||
|
0xf9 => DataFormat::Binary,
|
||||||
|
_ => DataFormat::Hex,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let informant = Informant::new(client.clone(), None, None, cmd.logger_config.color);
|
||||||
|
|
||||||
|
let do_import = |bytes| {
|
||||||
|
while client.queue_info().is_full() { sleep(Duration::from_secs(1)); }
|
||||||
|
match client.import_block(bytes) {
|
||||||
|
Err(BlockImportError::Import(ImportError::AlreadyInChain)) => {
|
||||||
|
trace!("Skipping block already in chain.");
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
return Err(format!("Cannot import block: {:?}", e));
|
||||||
|
},
|
||||||
|
Ok(_) => {},
|
||||||
|
}
|
||||||
|
informant.tick();
|
||||||
|
Ok(())
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
match format {
|
||||||
|
DataFormat::Binary => {
|
||||||
|
loop {
|
||||||
|
let mut bytes = if first_read > 0 {first_bytes.clone()} else {vec![0; READAHEAD_BYTES]};
|
||||||
|
let n = if first_read > 0 {
|
||||||
|
first_read
|
||||||
|
} else {
|
||||||
|
try!(instream.read(&mut bytes).map_err(|_| "Error reading from the file/stream."))
|
||||||
|
};
|
||||||
|
if n == 0 { break; }
|
||||||
|
first_read = 0;
|
||||||
|
let s = try!(PayloadInfo::from(&bytes).map_err(|e| format!("Invalid RLP in the file/stream: {:?}", e))).total();
|
||||||
|
bytes.resize(s, 0);
|
||||||
|
try!(instream.read_exact(&mut bytes[READAHEAD_BYTES..]).map_err(|_| "Error reading from the file/stream."));
|
||||||
|
try!(do_import(bytes));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
DataFormat::Hex => {
|
||||||
|
for line in BufReader::new(instream).lines() {
|
||||||
|
let s = try!(line.map_err(|_| "Error reading from the file/stream."));
|
||||||
|
let s = if first_read > 0 {from_utf8(&first_bytes).unwrap().to_owned() + &(s[..])} else {s};
|
||||||
|
first_read = 0;
|
||||||
|
let bytes = try!(s.from_hex().map_err(|_| "Invalid hex in file/stream."));
|
||||||
|
try!(do_import(bytes));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
client.flush_queue();
|
||||||
|
|
||||||
|
Ok("Import completed.".into())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn execute_export(cmd: ExportBlockchain) -> Result<String, String> {
|
||||||
|
// Setup panic handler
|
||||||
|
let panic_handler = PanicHandler::new_in_arc();
|
||||||
|
|
||||||
|
let format = cmd.format.unwrap_or_else(Default::default);
|
||||||
|
|
||||||
|
// load spec file
|
||||||
|
let spec = try!(cmd.spec.spec());
|
||||||
|
|
||||||
|
// load genesis hash
|
||||||
|
let genesis_hash = spec.genesis_header().hash();
|
||||||
|
|
||||||
|
// Setup logging
|
||||||
|
let _logger = setup_log(&cmd.logger_config);
|
||||||
|
|
||||||
|
fdlimit::raise_fd_limit();
|
||||||
|
|
||||||
|
// select pruning algorithm
|
||||||
|
let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref());
|
||||||
|
|
||||||
|
// prepare client_path
|
||||||
|
let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm);
|
||||||
|
|
||||||
|
// execute upgrades
|
||||||
|
try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm));
|
||||||
|
|
||||||
|
// prepare client config
|
||||||
|
let client_config = to_client_config(&cmd.cache_config, &cmd.dirs, genesis_hash, cmd.mode, cmd.tracing, cmd.pruning, cmd.compaction, VMType::default(), "".into(), spec.fork_name.as_ref());
|
||||||
|
|
||||||
|
let service = try!(ClientService::start(
|
||||||
|
client_config,
|
||||||
|
spec,
|
||||||
|
Path::new(&client_path),
|
||||||
|
Arc::new(Miner::with_spec(try!(cmd.spec.spec())))
|
||||||
|
).map_err(|e| format!("Client service error: {:?}", e)));
|
||||||
|
|
||||||
|
panic_handler.forward_from(&service);
|
||||||
|
let client = service.client();
|
||||||
|
|
||||||
|
let mut out: Box<io::Write> = match cmd.file_path {
|
||||||
|
Some(f) => Box::new(try!(fs::File::create(&f).map_err(|_| format!("Cannot write to file given: {}", f)))),
|
||||||
|
None => Box::new(io::stdout()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let from = try!(client.block_number(cmd.from_block).ok_or("From block could not be found"));
|
||||||
|
let to = try!(client.block_number(cmd.to_block).ok_or("From block could not be found"));
|
||||||
|
|
||||||
|
for i in from..(to + 1) {
|
||||||
|
let b = client.block(BlockID::Number(i)).unwrap();
|
||||||
|
match format {
|
||||||
|
DataFormat::Binary => { out.write(&b).expect("Couldn't write to stream."); }
|
||||||
|
DataFormat::Hex => { out.write_fmt(format_args!("{}", b.pretty())).expect("Couldn't write to stream."); }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok("Export completed.".into())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use super::DataFormat;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_data_format_parsing() {
|
||||||
|
assert_eq!(DataFormat::Binary, "binary".parse().unwrap());
|
||||||
|
assert_eq!(DataFormat::Binary, "bin".parse().unwrap());
|
||||||
|
assert_eq!(DataFormat::Hex, "hex".parse().unwrap());
|
||||||
|
}
|
||||||
|
}
|
109
parity/cache.rs
Normal file
109
parity/cache.rs
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::cmp::max;
|
||||||
|
|
||||||
|
const MIN_BC_CACHE_MB: u32 = 4;
|
||||||
|
const MIN_DB_CACHE_MB: u32 = 2;
|
||||||
|
const MIN_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 16;
|
||||||
|
const DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 50;
|
||||||
|
|
||||||
|
/// Configuration for application cache sizes.
|
||||||
|
/// All values are represented in MB.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub struct CacheConfig {
|
||||||
|
/// Size of database cache set using option `set_block_cache_size_mb`
|
||||||
|
/// 50% is blockchain
|
||||||
|
/// 25% is tracing
|
||||||
|
/// 25% is state
|
||||||
|
db: u32,
|
||||||
|
/// Size of blockchain cache.
|
||||||
|
blockchain: u32,
|
||||||
|
/// Size of transaction queue cache.
|
||||||
|
queue: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for CacheConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
CacheConfig::new(64, 8, DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CacheConfig {
|
||||||
|
/// Creates new cache config with cumulative size equal `total`.
|
||||||
|
pub fn new_with_total_cache_size(total: u32) -> Self {
|
||||||
|
CacheConfig {
|
||||||
|
db: total * 7 / 8,
|
||||||
|
blockchain: total / 8,
|
||||||
|
queue: DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates new cache config with gitven details.
|
||||||
|
pub fn new(db: u32, blockchain: u32, queue: u32) -> Self {
|
||||||
|
CacheConfig {
|
||||||
|
db: db,
|
||||||
|
blockchain: blockchain,
|
||||||
|
queue: queue,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Size of db cache for blockchain.
|
||||||
|
pub fn db_blockchain_cache_size(&self) -> u32 {
|
||||||
|
max(MIN_DB_CACHE_MB, self.blockchain / 4)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Size of db cache for state.
|
||||||
|
pub fn db_state_cache_size(&self) -> u32 {
|
||||||
|
max(MIN_DB_CACHE_MB, self.db * 3 / 4)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Size of block queue size limit
|
||||||
|
pub fn queue(&self) -> u32 {
|
||||||
|
max(self.queue, MIN_BLOCK_QUEUE_SIZE_LIMIT_MB)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Size of the blockchain cache.
|
||||||
|
pub fn blockchain(&self) -> u32 {
|
||||||
|
max(self.blockchain, MIN_BC_CACHE_MB)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::CacheConfig;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_cache_config_constructor() {
|
||||||
|
let config = CacheConfig::new_with_total_cache_size(200);
|
||||||
|
assert_eq!(config.db, 175);
|
||||||
|
assert_eq!(config.blockchain(), 25);
|
||||||
|
assert_eq!(config.queue(), 50);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_cache_config_db_cache_sizes() {
|
||||||
|
let config = CacheConfig::new_with_total_cache_size(400);
|
||||||
|
assert_eq!(config.db, 350);
|
||||||
|
assert_eq!(config.db_blockchain_cache_size(), 12);
|
||||||
|
assert_eq!(config.db_state_cache_size(), 262);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_cache_config_default() {
|
||||||
|
assert_eq!(CacheConfig::default(), CacheConfig::new(64, 8, super::DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB));
|
||||||
|
}
|
||||||
|
}
|
@ -15,6 +15,7 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use util::version;
|
use util::version;
|
||||||
|
use docopt::Docopt;
|
||||||
|
|
||||||
pub const USAGE: &'static str = r#"
|
pub const USAGE: &'static str = r#"
|
||||||
Parity. Ethereum Client.
|
Parity. Ethereum Client.
|
||||||
@ -22,6 +23,8 @@ Parity. Ethereum Client.
|
|||||||
Copyright 2015, 2016 Ethcore (UK) Limited
|
Copyright 2015, 2016 Ethcore (UK) Limited
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
|
parity [options]
|
||||||
|
parity ui [options]
|
||||||
parity daemon <pid-file> [options]
|
parity daemon <pid-file> [options]
|
||||||
parity account (new | list ) [options]
|
parity account (new | list ) [options]
|
||||||
parity account import <path>... [options]
|
parity account import <path>... [options]
|
||||||
@ -29,8 +32,6 @@ Usage:
|
|||||||
parity import [ <file> ] [options]
|
parity import [ <file> ] [options]
|
||||||
parity export [ <file> ] [options]
|
parity export [ <file> ] [options]
|
||||||
parity signer new-token [options]
|
parity signer new-token [options]
|
||||||
parity [options]
|
|
||||||
parity ui [options]
|
|
||||||
|
|
||||||
Operating Options:
|
Operating Options:
|
||||||
--mode MODE Set the operating mode. MODE can be one of:
|
--mode MODE Set the operating mode. MODE can be one of:
|
||||||
@ -105,8 +106,8 @@ API and Console Options:
|
|||||||
--jsonrpc-apis APIS Specify the APIs available through the JSONRPC
|
--jsonrpc-apis APIS Specify the APIs available through the JSONRPC
|
||||||
interface. APIS is a comma-delimited list of API
|
interface. APIS is a comma-delimited list of API
|
||||||
name. Possible name are web3, eth, net, personal,
|
name. Possible name are web3, eth, net, personal,
|
||||||
ethcore, ethcore_set, traces.
|
ethcore, ethcore_set, traces, rpc.
|
||||||
[default: web3,eth,net,ethcore,personal,traces].
|
[default: web3,eth,net,ethcore,personal,traces,rpc].
|
||||||
--jsonrpc-hosts HOSTS List of allowed Host header values. This option will
|
--jsonrpc-hosts HOSTS List of allowed Host header values. This option will
|
||||||
validate the Host header sent by the browser, it
|
validate the Host header sent by the browser, it
|
||||||
is additional security against some attack
|
is additional security against some attack
|
||||||
@ -201,18 +202,16 @@ Footprint Options:
|
|||||||
fast - maintain journal overlay. Fast but 50MB used.
|
fast - maintain journal overlay. Fast but 50MB used.
|
||||||
auto - use the method most recently synced or
|
auto - use the method most recently synced or
|
||||||
default to fast if none synced [default: auto].
|
default to fast if none synced [default: auto].
|
||||||
--cache-pref-size BYTES Specify the preferred size of the blockchain cache in
|
--cache-size-db MB Override database cache size [default: 64].
|
||||||
bytes [default: 16384].
|
--cache-size-blocks MB Specify the prefered size of the blockchain cache in
|
||||||
--cache-max-size BYTES Specify the maximum size of the blockchain cache in
|
megabytes [default: 8].
|
||||||
bytes [default: 262144].
|
--cache-size-queue MB Specify the maximum size of memory to use for block
|
||||||
--queue-max-size BYTES Specify the maximum size of memory to use for block
|
queue [default: 50].
|
||||||
queue [default: 52428800].
|
--cache-size MB Set total amount of discretionary memory to use for
|
||||||
--cache MEGABYTES Set total amount of discretionary memory to use for
|
|
||||||
the entire system, overrides other cache and queue
|
the entire system, overrides other cache and queue
|
||||||
options.
|
options.
|
||||||
|
|
||||||
Database Options:
|
Database Options:
|
||||||
--db-cache-size MB Override RocksDB database cache size.
|
|
||||||
--db-compaction TYPE Database compaction type. TYPE may be one of:
|
--db-compaction TYPE Database compaction type. TYPE may be one of:
|
||||||
ssd - suitable for SSDs and fast HDDs;
|
ssd - suitable for SSDs and fast HDDs;
|
||||||
hdd - suitable for slow HDDs [default: ssd].
|
hdd - suitable for slow HDDs [default: ssd].
|
||||||
@ -260,6 +259,7 @@ Legacy Options:
|
|||||||
--basic-tx-usd.
|
--basic-tx-usd.
|
||||||
--etherbase ADDRESS Equivalent to --author ADDRESS.
|
--etherbase ADDRESS Equivalent to --author ADDRESS.
|
||||||
--extradata STRING Equivalent to --extra-data STRING.
|
--extradata STRING Equivalent to --extra-data STRING.
|
||||||
|
--cache MB Equivalent to --cache-size MB.
|
||||||
|
|
||||||
Miscellaneous Options:
|
Miscellaneous Options:
|
||||||
-l --logging LOGGING Specify the logging level. Must conform to the same
|
-l --logging LOGGING Specify the logging level. Must conform to the same
|
||||||
@ -271,7 +271,7 @@ Miscellaneous Options:
|
|||||||
-h --help Show this screen.
|
-h --help Show this screen.
|
||||||
"#;
|
"#;
|
||||||
|
|
||||||
#[derive(Debug, RustcDecodable)]
|
#[derive(Debug, PartialEq, RustcDecodable)]
|
||||||
pub struct Args {
|
pub struct Args {
|
||||||
pub cmd_daemon: bool,
|
pub cmd_daemon: bool,
|
||||||
pub cmd_account: bool,
|
pub cmd_account: bool,
|
||||||
@ -294,7 +294,6 @@ pub struct Args {
|
|||||||
pub flag_identity: String,
|
pub flag_identity: String,
|
||||||
pub flag_unlock: Option<String>,
|
pub flag_unlock: Option<String>,
|
||||||
pub flag_password: Vec<String>,
|
pub flag_password: Vec<String>,
|
||||||
pub flag_cache: Option<usize>,
|
|
||||||
pub flag_keys_path: String,
|
pub flag_keys_path: String,
|
||||||
pub flag_keys_iterations: u32,
|
pub flag_keys_iterations: u32,
|
||||||
pub flag_no_import_keys: bool,
|
pub flag_no_import_keys: bool,
|
||||||
@ -309,9 +308,13 @@ pub struct Args {
|
|||||||
pub flag_node_key: Option<String>,
|
pub flag_node_key: Option<String>,
|
||||||
pub flag_reserved_peers: Option<String>,
|
pub flag_reserved_peers: Option<String>,
|
||||||
pub flag_reserved_only: bool,
|
pub flag_reserved_only: bool,
|
||||||
pub flag_cache_pref_size: usize,
|
|
||||||
pub flag_cache_max_size: usize,
|
pub flag_cache_size_db: u32,
|
||||||
pub flag_queue_max_size: usize,
|
pub flag_cache_size_blocks: u32,
|
||||||
|
pub flag_cache_size_queue: u32,
|
||||||
|
pub flag_cache_size: Option<u32>,
|
||||||
|
pub flag_cache: Option<u32>,
|
||||||
|
|
||||||
pub flag_no_jsonrpc: bool,
|
pub flag_no_jsonrpc: bool,
|
||||||
pub flag_jsonrpc_interface: String,
|
pub flag_jsonrpc_interface: String,
|
||||||
pub flag_jsonrpc_port: u16,
|
pub flag_jsonrpc_port: u16,
|
||||||
@ -380,13 +383,18 @@ pub struct Args {
|
|||||||
pub flag_dapps_off: bool,
|
pub flag_dapps_off: bool,
|
||||||
pub flag_ipcpath: Option<String>,
|
pub flag_ipcpath: Option<String>,
|
||||||
pub flag_ipcapi: Option<String>,
|
pub flag_ipcapi: Option<String>,
|
||||||
pub flag_db_cache_size: Option<usize>,
|
|
||||||
pub flag_db_compaction: String,
|
pub flag_db_compaction: String,
|
||||||
pub flag_fat_db: bool,
|
pub flag_fat_db: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn print_version() {
|
impl Default for Args {
|
||||||
println!("\
|
fn default() -> Self {
|
||||||
|
Docopt::new(USAGE).unwrap().argv(&[] as &[&str]).decode().unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn print_version() -> String {
|
||||||
|
format!("\
|
||||||
Parity
|
Parity
|
||||||
version {}
|
version {}
|
||||||
Copyright 2015, 2016 Ethcore (UK) Limited
|
Copyright 2015, 2016 Ethcore (UK) Limited
|
||||||
@ -395,6 +403,6 @@ This is free software: you are free to change and redistribute it.
|
|||||||
There is NO WARRANTY, to the extent permitted by law.
|
There is NO WARRANTY, to the extent permitted by law.
|
||||||
|
|
||||||
By Wood/Paronyan/Kotewicz/Drwięga/Volf.\
|
By Wood/Paronyan/Kotewicz/Drwięga/Volf.\
|
||||||
", version());
|
", version())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -15,17 +15,17 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::str::FromStr;
|
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use util::panics::PanicHandler;
|
use util::panics::PanicHandler;
|
||||||
use die::*;
|
|
||||||
use rpc_apis;
|
use rpc_apis;
|
||||||
|
use helpers::replace_home;
|
||||||
|
|
||||||
#[cfg(feature = "dapps")]
|
#[cfg(feature = "dapps")]
|
||||||
pub use ethcore_dapps::Server as WebappServer;
|
pub use ethcore_dapps::Server as WebappServer;
|
||||||
#[cfg(not(feature = "dapps"))]
|
#[cfg(not(feature = "dapps"))]
|
||||||
pub struct WebappServer;
|
pub struct WebappServer;
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
pub struct Configuration {
|
pub struct Configuration {
|
||||||
pub enabled: bool,
|
pub enabled: bool,
|
||||||
pub interface: String,
|
pub interface: String,
|
||||||
@ -35,18 +35,31 @@ pub struct Configuration {
|
|||||||
pub dapps_path: String,
|
pub dapps_path: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for Configuration {
|
||||||
|
fn default() -> Self {
|
||||||
|
Configuration {
|
||||||
|
enabled: true,
|
||||||
|
interface: "127.0.0.1".into(),
|
||||||
|
port: 8080,
|
||||||
|
user: None,
|
||||||
|
pass: None,
|
||||||
|
dapps_path: replace_home("$HOME/.parity/dapps"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct Dependencies {
|
pub struct Dependencies {
|
||||||
pub panic_handler: Arc<PanicHandler>,
|
pub panic_handler: Arc<PanicHandler>,
|
||||||
pub apis: Arc<rpc_apis::Dependencies>,
|
pub apis: Arc<rpc_apis::Dependencies>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new(configuration: Configuration, deps: Dependencies) -> Option<WebappServer> {
|
pub fn new(configuration: Configuration, deps: Dependencies) -> Result<Option<WebappServer>, String> {
|
||||||
if !configuration.enabled {
|
if !configuration.enabled {
|
||||||
return None;
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
let url = format!("{}:{}", configuration.interface, configuration.port);
|
let url = format!("{}:{}", configuration.interface, configuration.port);
|
||||||
let addr = SocketAddr::from_str(&url).unwrap_or_else(|_| die!("{}: Invalid Webapps listen host/port given.", url));
|
let addr = try!(url.parse().map_err(|_| format!("Invalid Webapps listen host/port given: {}", url)));
|
||||||
|
|
||||||
let auth = configuration.user.as_ref().map(|username| {
|
let auth = configuration.user.as_ref().map(|username| {
|
||||||
let password = configuration.pass.as_ref().map_or_else(|| {
|
let password = configuration.pass.as_ref().map_or_else(|| {
|
||||||
@ -59,7 +72,7 @@ pub fn new(configuration: Configuration, deps: Dependencies) -> Option<WebappSer
|
|||||||
(username.to_owned(), password)
|
(username.to_owned(), password)
|
||||||
});
|
});
|
||||||
|
|
||||||
Some(setup_dapps_server(deps, configuration.dapps_path, &addr, auth))
|
Ok(Some(try!(setup_dapps_server(deps, configuration.dapps_path, &addr, auth))))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(feature = "dapps"))]
|
#[cfg(not(feature = "dapps"))]
|
||||||
@ -68,8 +81,8 @@ pub fn setup_dapps_server(
|
|||||||
_dapps_path: String,
|
_dapps_path: String,
|
||||||
_url: &SocketAddr,
|
_url: &SocketAddr,
|
||||||
_auth: Option<(String, String)>,
|
_auth: Option<(String, String)>,
|
||||||
) -> ! {
|
) -> Result<WebappServer, String> {
|
||||||
die!("Your Parity version has been compiled without WebApps support.")
|
Err("Your Parity version has been compiled without WebApps support.".into())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "dapps")]
|
#[cfg(feature = "dapps")]
|
||||||
@ -78,7 +91,7 @@ pub fn setup_dapps_server(
|
|||||||
dapps_path: String,
|
dapps_path: String,
|
||||||
url: &SocketAddr,
|
url: &SocketAddr,
|
||||||
auth: Option<(String, String)>
|
auth: Option<(String, String)>
|
||||||
) -> WebappServer {
|
) -> Result<WebappServer, String> {
|
||||||
use ethcore_dapps as dapps;
|
use ethcore_dapps as dapps;
|
||||||
|
|
||||||
let server = dapps::ServerBuilder::new(dapps_path);
|
let server = dapps::ServerBuilder::new(dapps_path);
|
||||||
@ -93,15 +106,14 @@ pub fn setup_dapps_server(
|
|||||||
};
|
};
|
||||||
|
|
||||||
match start_result {
|
match start_result {
|
||||||
Err(dapps::ServerError::IoError(err)) => die_with_io_error("WebApps", err),
|
Err(dapps::ServerError::IoError(err)) => Err(format!("WebApps io error: {}", err)),
|
||||||
Err(e) => die!("WebApps: {:?}", e),
|
Err(e) => Err(format!("WebApps error: {:?}", e)),
|
||||||
Ok(server) => {
|
Ok(server) => {
|
||||||
server.set_panic_handler(move || {
|
server.set_panic_handler(move || {
|
||||||
deps.panic_handler.notify_all("Panic in WebApp thread.".to_owned());
|
deps.panic_handler.notify_all("Panic in WebApp thread.".to_owned());
|
||||||
});
|
});
|
||||||
server
|
Ok(server)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
148
parity/deprecated.rs
Normal file
148
parity/deprecated.rs
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::fmt;
|
||||||
|
use cli::Args;
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum Deprecated {
|
||||||
|
DoesNothing(&'static str),
|
||||||
|
Replaced(&'static str, &'static str),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for Deprecated {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||||
|
match *self {
|
||||||
|
Deprecated::DoesNothing(s) => write!(f, "Option '{}' does nothing. It's on by default", s),
|
||||||
|
Deprecated::Replaced(old, new) => write!(f, "Option '{}' is deprecated. Please use '{}' instead", old, new),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Deprecated {
|
||||||
|
fn jsonrpc() -> Self {
|
||||||
|
Deprecated::DoesNothing("--jsonrpc")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rpc() -> Self {
|
||||||
|
Deprecated::DoesNothing("--rpc")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn jsonrpc_off() -> Self {
|
||||||
|
Deprecated::Replaced("--jsonrpc-off", "--no-jsonrpc")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn webapp() -> Self {
|
||||||
|
Deprecated::DoesNothing("--webapp")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn dapps_off() -> Self {
|
||||||
|
Deprecated::Replaced("--dapps-off", "--no-daps")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ipcdisable() -> Self {
|
||||||
|
Deprecated::Replaced("--ipcdisable", "--no-ipc")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ipc_off() -> Self {
|
||||||
|
Deprecated::Replaced("--ipc-off", "--no-ipc")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn etherbase() -> Self {
|
||||||
|
Deprecated::Replaced("--etherbase", "--author")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn extradata() -> Self {
|
||||||
|
Deprecated::Replaced("--extradata", "--extra-data")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_deprecated(args: &Args) -> Vec<Deprecated> {
|
||||||
|
let mut result = vec![];
|
||||||
|
|
||||||
|
if args.flag_jsonrpc {
|
||||||
|
result.push(Deprecated::jsonrpc());
|
||||||
|
}
|
||||||
|
|
||||||
|
if args.flag_rpc {
|
||||||
|
result.push(Deprecated::rpc());
|
||||||
|
}
|
||||||
|
|
||||||
|
if args.flag_jsonrpc_off {
|
||||||
|
result.push(Deprecated::jsonrpc_off());
|
||||||
|
}
|
||||||
|
|
||||||
|
if args.flag_webapp {
|
||||||
|
result.push(Deprecated::webapp())
|
||||||
|
}
|
||||||
|
|
||||||
|
if args.flag_dapps_off {
|
||||||
|
result.push(Deprecated::dapps_off());
|
||||||
|
}
|
||||||
|
|
||||||
|
if args.flag_ipcdisable {
|
||||||
|
result.push(Deprecated::ipcdisable());
|
||||||
|
}
|
||||||
|
|
||||||
|
if args.flag_ipc_off {
|
||||||
|
result.push(Deprecated::ipc_off());
|
||||||
|
}
|
||||||
|
|
||||||
|
if args.flag_etherbase.is_some() {
|
||||||
|
result.push(Deprecated::etherbase());
|
||||||
|
}
|
||||||
|
|
||||||
|
if args.flag_extradata.is_some() {
|
||||||
|
result.push(Deprecated::extradata());
|
||||||
|
}
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use cli::Args;
|
||||||
|
use super::{Deprecated, find_deprecated};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_find_deprecated() {
|
||||||
|
assert_eq!(find_deprecated(&Args::default()), vec![]);
|
||||||
|
assert_eq!(find_deprecated(&{
|
||||||
|
let mut args = Args::default();
|
||||||
|
args.flag_jsonrpc = true;
|
||||||
|
args.flag_rpc = true;
|
||||||
|
args.flag_jsonrpc_off = true;
|
||||||
|
args.flag_webapp = true;
|
||||||
|
args.flag_dapps_off = true;
|
||||||
|
args.flag_ipcdisable = true;
|
||||||
|
args.flag_ipc_off = true;
|
||||||
|
args.flag_etherbase = Some(Default::default());
|
||||||
|
args.flag_extradata = Some(Default::default());
|
||||||
|
args
|
||||||
|
}), vec![
|
||||||
|
Deprecated::jsonrpc(),
|
||||||
|
Deprecated::rpc(),
|
||||||
|
Deprecated::jsonrpc_off(),
|
||||||
|
Deprecated::webapp(),
|
||||||
|
Deprecated::dapps_off(),
|
||||||
|
Deprecated::ipcdisable(),
|
||||||
|
Deprecated::ipc_off(),
|
||||||
|
Deprecated::etherbase(),
|
||||||
|
Deprecated::extradata(),
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -1,61 +0,0 @@
|
|||||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
|
||||||
// This file is part of Parity.
|
|
||||||
|
|
||||||
// Parity is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std;
|
|
||||||
use ethcore;
|
|
||||||
use ethcore::client::Error as ClientError;
|
|
||||||
use util::UtilError;
|
|
||||||
use std::process::exit;
|
|
||||||
|
|
||||||
#[macro_export]
|
|
||||||
macro_rules! die {
|
|
||||||
($($arg:tt)*) => (::die::die_with_message(&format!("{}", format_args!($($arg)*))));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn die_with_error(module: &'static str, e: ethcore::error::Error) -> ! {
|
|
||||||
use ethcore::error::Error;
|
|
||||||
|
|
||||||
match e {
|
|
||||||
Error::Util(UtilError::StdIo(e)) => die_with_io_error(module, e),
|
|
||||||
Error::Client(ClientError::Trace(e)) => die_with_message(&format!("{}", e)),
|
|
||||||
_ => {
|
|
||||||
trace!(target: module, "{:?}", e);
|
|
||||||
die!("{}: {}", module, e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn die_with_io_error(module: &'static str, e: std::io::Error) -> ! {
|
|
||||||
trace!(target: module, "{:?}", e);
|
|
||||||
|
|
||||||
match e.kind() {
|
|
||||||
std::io::ErrorKind::PermissionDenied => {
|
|
||||||
die!("{}: No permissions to bind to specified port.", module)
|
|
||||||
},
|
|
||||||
std::io::ErrorKind::AddrInUse => {
|
|
||||||
die!("{}: Specified address is already in use. Please make sure that nothing is listening on the same port or try using a different one.", module)
|
|
||||||
},
|
|
||||||
std::io::ErrorKind::AddrNotAvailable => {
|
|
||||||
die!("{}: Could not use specified interface or given address is invalid.", module)
|
|
||||||
},
|
|
||||||
_ => die!("{}: {}", module, e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn die_with_message(msg: &str) -> ! {
|
|
||||||
println!("ERROR: {}", msg);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
79
parity/dir.rs
Normal file
79
parity/dir.rs
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::fs;
|
||||||
|
use std::path::{PathBuf, Path};
|
||||||
|
use util::{H64, H256};
|
||||||
|
use util::journaldb::Algorithm;
|
||||||
|
use helpers::replace_home;
|
||||||
|
|
||||||
|
// this const is irrelevent cause we do have migrations now,
|
||||||
|
// but we still use it for backwards compatibility
|
||||||
|
const LEGACY_CLIENT_DB_VER_STR: &'static str = "5.3";
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub struct Directories {
|
||||||
|
pub db: String,
|
||||||
|
pub keys: String,
|
||||||
|
pub signer: String,
|
||||||
|
pub dapps: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Directories {
|
||||||
|
fn default() -> Self {
|
||||||
|
Directories {
|
||||||
|
db: replace_home("$HOME/.parity"),
|
||||||
|
keys: replace_home("$HOME/.parity/keys"),
|
||||||
|
signer: replace_home("$HOME/.parity/signer"),
|
||||||
|
dapps: replace_home("$HOME/.parity/dapps"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Directories {
|
||||||
|
pub fn create_dirs(&self) -> Result<(), String> {
|
||||||
|
try!(fs::create_dir_all(&self.db).map_err(|e| e.to_string()));
|
||||||
|
try!(fs::create_dir_all(&self.keys).map_err(|e| e.to_string()));
|
||||||
|
try!(fs::create_dir_all(&self.signer).map_err(|e| e.to_string()));
|
||||||
|
try!(fs::create_dir_all(&self.dapps).map_err(|e| e.to_string()));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the path for the databases given the root path and information on the databases.
|
||||||
|
pub fn client_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf {
|
||||||
|
let mut dir = Path::new(&self.db).to_path_buf();
|
||||||
|
dir.push(format!("{:?}{}", H64::from(genesis_hash), fork_name.map(|f| format!("-{}", f)).unwrap_or_default()));
|
||||||
|
dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning));
|
||||||
|
dir
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::Directories;
|
||||||
|
use helpers::replace_home;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_default_directories() {
|
||||||
|
let expected = Directories {
|
||||||
|
db: replace_home("$HOME/.parity"),
|
||||||
|
keys: replace_home("$HOME/.parity/keys"),
|
||||||
|
signer: replace_home("$HOME/.parity/signer"),
|
||||||
|
dapps: replace_home("$HOME/.parity/dapps"),
|
||||||
|
};
|
||||||
|
assert_eq!(expected, Directories::default());
|
||||||
|
}
|
||||||
|
}
|
391
parity/helpers.rs
Normal file
391
parity/helpers.rs
Normal file
@ -0,0 +1,391 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::{io, env};
|
||||||
|
use std::io::{Write, Read, BufReader, BufRead};
|
||||||
|
use std::time::Duration;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::fs::File;
|
||||||
|
use util::{clean_0x, U256, Uint, Address, path, is_valid_node_url, H256};
|
||||||
|
use util::journaldb::Algorithm;
|
||||||
|
use ethcore::client::{Mode, BlockID, Switch, VMType, DatabaseCompactionProfile, ClientConfig};
|
||||||
|
use ethcore::miner::PendingSet;
|
||||||
|
use cache::CacheConfig;
|
||||||
|
use dir::Directories;
|
||||||
|
use params::Pruning;
|
||||||
|
use upgrade::upgrade;
|
||||||
|
use migration::migrate;
|
||||||
|
|
||||||
|
pub fn to_duration(s: &str) -> Result<Duration, String> {
|
||||||
|
to_seconds(s).map(Duration::from_secs)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_seconds(s: &str) -> Result<u64, String> {
|
||||||
|
let bad = |_| {
|
||||||
|
format!("{}: Invalid duration given. See parity --help for more information.", s)
|
||||||
|
};
|
||||||
|
|
||||||
|
match s {
|
||||||
|
"twice-daily" => Ok(12 * 60 * 60),
|
||||||
|
"half-hourly" => Ok(30 * 60),
|
||||||
|
"1second" | "1 second" | "second" => Ok(1),
|
||||||
|
"1minute" | "1 minute" | "minute" => Ok(60),
|
||||||
|
"hourly" | "1hour" | "1 hour" | "hour" => Ok(60 * 60),
|
||||||
|
"daily" | "1day" | "1 day" | "day" => Ok(24 * 60 * 60),
|
||||||
|
x if x.ends_with("seconds") => x[0..x.len() - 7].parse().map_err(bad),
|
||||||
|
x if x.ends_with("minutes") => x[0..x.len() -7].parse::<u64>().map_err(bad).map(|x| x * 60),
|
||||||
|
x if x.ends_with("hours") => x[0..x.len() - 5].parse::<u64>().map_err(bad).map(|x| x * 60 * 60),
|
||||||
|
x if x.ends_with("days") => x[0..x.len() - 4].parse::<u64>().map_err(bad).map(|x| x * 24 * 60 * 60),
|
||||||
|
x => x.parse().map_err(bad),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_mode(s: &str, timeout: u64, alarm: u64) -> Result<Mode, String> {
|
||||||
|
match s {
|
||||||
|
"active" => Ok(Mode::Active),
|
||||||
|
"passive" => Ok(Mode::Passive(Duration::from_secs(timeout), Duration::from_secs(alarm))),
|
||||||
|
"dark" => Ok(Mode::Dark(Duration::from_secs(timeout))),
|
||||||
|
_ => Err(format!("{}: Invalid address for --mode. Must be one of active, passive or dark.", s)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_block_id(s: &str) -> Result<BlockID, String> {
|
||||||
|
if s == "latest" {
|
||||||
|
Ok(BlockID::Latest)
|
||||||
|
} else if let Ok(num) = s.parse() {
|
||||||
|
Ok(BlockID::Number(num))
|
||||||
|
} else if let Ok(hash) = s.parse() {
|
||||||
|
Ok(BlockID::Hash(hash))
|
||||||
|
} else {
|
||||||
|
Err("Invalid block.".into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_u256(s: &str) -> Result<U256, String> {
|
||||||
|
if let Ok(decimal) = U256::from_dec_str(s) {
|
||||||
|
Ok(decimal)
|
||||||
|
} else if let Ok(hex) = clean_0x(s).parse() {
|
||||||
|
Ok(hex)
|
||||||
|
} else {
|
||||||
|
Err(format!("Invalid numeric value: {}", s))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_pending_set(s: &str) -> Result<PendingSet, String> {
|
||||||
|
match s {
|
||||||
|
"cheap" => Ok(PendingSet::AlwaysQueue),
|
||||||
|
"strict" => Ok(PendingSet::AlwaysSealing),
|
||||||
|
"lenient" => Ok(PendingSet::SealingOrElseQueue),
|
||||||
|
other => Err(format!("Invalid pending set value: {:?}", other)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_address(s: Option<String>) -> Result<Address, String> {
|
||||||
|
match s {
|
||||||
|
Some(ref a) => clean_0x(a).parse().map_err(|_| format!("Invalid address: {:?}", a)),
|
||||||
|
None => Ok(Address::default())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_addresses(s: &Option<String>) -> Result<Vec<Address>, String> {
|
||||||
|
match *s {
|
||||||
|
Some(ref adds) if adds.is_empty() => adds.split(',')
|
||||||
|
.map(|a| clean_0x(a).parse().map_err(|_| format!("Invalid address: {:?}", a)))
|
||||||
|
.collect(),
|
||||||
|
_ => Ok(Vec::new()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tries to parse string as a price.
|
||||||
|
pub fn to_price(s: &str) -> Result<f32, String> {
|
||||||
|
s.parse::<f32>().map_err(|_| format!("Invalid transaciton price 's' given. Must be a decimal number."))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Replaces `$HOME` str with home directory path.
|
||||||
|
pub fn replace_home(arg: &str) -> String {
|
||||||
|
// the $HOME directory on mac os should be `~/Library` or `~/Library/Application Support`
|
||||||
|
let r = arg.replace("$HOME", env::home_dir().unwrap().to_str().unwrap());
|
||||||
|
r.replace("/", &::std::path::MAIN_SEPARATOR.to_string() )
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Flush output buffer.
|
||||||
|
pub fn flush_stdout() {
|
||||||
|
io::stdout().flush().expect("stdout is flushable; qed");
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns default geth ipc path.
|
||||||
|
pub fn geth_ipc_path(testnet: bool) -> String {
|
||||||
|
// Windows path should not be hardcoded here.
|
||||||
|
// Instead it should be a part of path::ethereum
|
||||||
|
if cfg!(windows) {
|
||||||
|
return r"\\.\pipe\geth.ipc".to_owned();
|
||||||
|
}
|
||||||
|
|
||||||
|
if testnet {
|
||||||
|
path::ethereum::with_testnet("geth.ipc").to_str().unwrap().to_owned()
|
||||||
|
} else {
|
||||||
|
path::ethereum::with_default("geth.ipc").to_str().unwrap().to_owned()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Formats and returns parity ipc path.
|
||||||
|
pub fn parity_ipc_path(s: &str) -> String {
|
||||||
|
// Windows path should not be hardcoded here.
|
||||||
|
if cfg!(windows) {
|
||||||
|
return r"\\.\pipe\parity.jsonrpc".to_owned();
|
||||||
|
}
|
||||||
|
|
||||||
|
replace_home(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Validates and formats bootnodes option.
|
||||||
|
pub fn to_bootnodes(bootnodes: &Option<String>) -> Result<Vec<String>, String> {
|
||||||
|
match *bootnodes {
|
||||||
|
Some(ref x) if !x.is_empty() => x.split(',').map(|s| {
|
||||||
|
if is_valid_node_url(s) {
|
||||||
|
Ok(s.to_owned())
|
||||||
|
} else {
|
||||||
|
Err(format!("Invalid node address format given for a boot node: {}", s))
|
||||||
|
}
|
||||||
|
}).collect(),
|
||||||
|
Some(_) => Ok(vec![]),
|
||||||
|
None => Ok(vec![])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn default_network_config() -> ::util::NetworkConfiguration {
|
||||||
|
use util::{NetworkConfiguration, NonReservedPeerMode};
|
||||||
|
NetworkConfiguration {
|
||||||
|
config_path: Some(replace_home("$HOME/.parity/network")),
|
||||||
|
listen_address: Some("0.0.0.0:30303".parse().unwrap()),
|
||||||
|
public_address: None,
|
||||||
|
udp_port: None,
|
||||||
|
nat_enabled: true,
|
||||||
|
discovery_enabled: true,
|
||||||
|
boot_nodes: Vec::new(),
|
||||||
|
use_secret: None,
|
||||||
|
ideal_peers: 25,
|
||||||
|
reserved_nodes: Vec::new(),
|
||||||
|
non_reserved_mode: NonReservedPeerMode::Accept,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_client_config(
|
||||||
|
cache_config: &CacheConfig,
|
||||||
|
dirs: &Directories,
|
||||||
|
genesis_hash: H256,
|
||||||
|
mode: Mode,
|
||||||
|
tracing: Switch,
|
||||||
|
pruning: Pruning,
|
||||||
|
compaction: DatabaseCompactionProfile,
|
||||||
|
vm_type: VMType,
|
||||||
|
name: String,
|
||||||
|
fork_name: Option<&String>,
|
||||||
|
) -> ClientConfig {
|
||||||
|
let mut client_config = ClientConfig::default();
|
||||||
|
|
||||||
|
let mb = 1024 * 1024;
|
||||||
|
// in bytes
|
||||||
|
client_config.blockchain.max_cache_size = cache_config.blockchain() as usize * mb;
|
||||||
|
// in bytes
|
||||||
|
client_config.blockchain.pref_cache_size = cache_config.blockchain() as usize * 3 / 4 * mb;
|
||||||
|
// db blockchain cache size, in megabytes
|
||||||
|
client_config.blockchain.db_cache_size = Some(cache_config.db_blockchain_cache_size() as usize);
|
||||||
|
// db state cache size, in megabytes
|
||||||
|
client_config.db_cache_size = Some(cache_config.db_state_cache_size() as usize);
|
||||||
|
// db queue cache size, in bytes
|
||||||
|
client_config.queue.max_mem_use = cache_config.queue() as usize * mb;
|
||||||
|
|
||||||
|
client_config.mode = mode;
|
||||||
|
client_config.tracing.enabled = tracing;
|
||||||
|
client_config.pruning = pruning.to_algorithm(dirs, genesis_hash, fork_name);
|
||||||
|
client_config.db_compaction = compaction;
|
||||||
|
client_config.vm_type = vm_type;
|
||||||
|
client_config.name = name;
|
||||||
|
client_config
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn execute_upgrades(dirs: &Directories, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> Result<(), String> {
|
||||||
|
match upgrade(Some(&dirs.db)) {
|
||||||
|
Ok(upgrades_applied) if upgrades_applied > 0 => {
|
||||||
|
debug!("Executed {} upgrade scripts - ok", upgrades_applied);
|
||||||
|
},
|
||||||
|
Err(e) => {
|
||||||
|
return Err(format!("Error upgrading parity data: {:?}", e));
|
||||||
|
},
|
||||||
|
_ => {},
|
||||||
|
}
|
||||||
|
|
||||||
|
let client_path = dirs.client_path(genesis_hash, fork_name, pruning);
|
||||||
|
migrate(&client_path, pruning).map_err(|e| format!("{}", e))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Prompts user asking for password.
|
||||||
|
pub fn password_prompt() -> Result<String, String> {
|
||||||
|
use rpassword::read_password;
|
||||||
|
|
||||||
|
println!("Please note that password is NOT RECOVERABLE.");
|
||||||
|
print!("Type password: ");
|
||||||
|
flush_stdout();
|
||||||
|
|
||||||
|
let password = read_password().unwrap();
|
||||||
|
|
||||||
|
print!("Repeat password: ");
|
||||||
|
flush_stdout();
|
||||||
|
|
||||||
|
let password_repeat = read_password().unwrap();
|
||||||
|
|
||||||
|
if password != password_repeat {
|
||||||
|
return Err("Passwords do not match!".into());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(password)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read a password from password file.
|
||||||
|
pub fn password_from_file<P>(path: P) -> Result<String, String> where P: AsRef<Path> {
|
||||||
|
let mut file = try!(File::open(path).map_err(|_| "Unable to open password file."));
|
||||||
|
let mut file_content = String::new();
|
||||||
|
try!(file.read_to_string(&mut file_content).map_err(|_| "Unable to read password file."));
|
||||||
|
// remove eof
|
||||||
|
Ok((&file_content[..file_content.len() - 1]).to_owned())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reads passwords from files. Treats each line as a separate password.
|
||||||
|
pub fn passwords_from_files(files: Vec<String>) -> Result<Vec<String>, String> {
|
||||||
|
let passwords = files.iter().map(|filename| {
|
||||||
|
let file = try!(File::open(filename).map_err(|_| format!("{} Unable to read password file. Ensure it exists and permissions are correct.", filename)));
|
||||||
|
let reader = BufReader::new(&file);
|
||||||
|
let lines = reader.lines()
|
||||||
|
.map(|l| l.unwrap())
|
||||||
|
.collect::<Vec<String>>();
|
||||||
|
Ok(lines)
|
||||||
|
}).collect::<Result<Vec<Vec<String>>, String>>();
|
||||||
|
Ok(try!(passwords).into_iter().flat_map(|x| x).collect())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::time::Duration;
|
||||||
|
use util::{U256};
|
||||||
|
use ethcore::client::{Mode, BlockID};
|
||||||
|
use ethcore::miner::PendingSet;
|
||||||
|
use super::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_address, to_price, geth_ipc_path, to_bootnodes};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_to_duration() {
|
||||||
|
assert_eq!(to_duration("twice-daily").unwrap(), Duration::from_secs(12 * 60 * 60));
|
||||||
|
assert_eq!(to_duration("half-hourly").unwrap(), Duration::from_secs(30 * 60));
|
||||||
|
assert_eq!(to_duration("1second").unwrap(), Duration::from_secs(1));
|
||||||
|
assert_eq!(to_duration("2seconds").unwrap(), Duration::from_secs(2));
|
||||||
|
assert_eq!(to_duration("15seconds").unwrap(), Duration::from_secs(15));
|
||||||
|
assert_eq!(to_duration("1minute").unwrap(), Duration::from_secs(1 * 60));
|
||||||
|
assert_eq!(to_duration("2minutes").unwrap(), Duration::from_secs(2 * 60));
|
||||||
|
assert_eq!(to_duration("15minutes").unwrap(), Duration::from_secs(15 * 60));
|
||||||
|
assert_eq!(to_duration("hourly").unwrap(), Duration::from_secs(60 * 60));
|
||||||
|
assert_eq!(to_duration("daily").unwrap(), Duration::from_secs(24 * 60 * 60));
|
||||||
|
assert_eq!(to_duration("1hour").unwrap(), Duration::from_secs(1 * 60 * 60));
|
||||||
|
assert_eq!(to_duration("2hours").unwrap(), Duration::from_secs(2 * 60 * 60));
|
||||||
|
assert_eq!(to_duration("15hours").unwrap(), Duration::from_secs(15 * 60 * 60));
|
||||||
|
assert_eq!(to_duration("1day").unwrap(), Duration::from_secs(1 * 24 * 60 * 60));
|
||||||
|
assert_eq!(to_duration("2days").unwrap(), Duration::from_secs(2 * 24 *60 * 60));
|
||||||
|
assert_eq!(to_duration("15days").unwrap(), Duration::from_secs(15 * 24 * 60 * 60));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_to_mode() {
|
||||||
|
assert_eq!(to_mode("active", 0, 0).unwrap(), Mode::Active);
|
||||||
|
assert_eq!(to_mode("passive", 10, 20).unwrap(), Mode::Passive(Duration::from_secs(10), Duration::from_secs(20)));
|
||||||
|
assert_eq!(to_mode("dark", 20, 30).unwrap(), Mode::Dark(Duration::from_secs(20)));
|
||||||
|
assert!(to_mode("other", 20, 30).is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_to_block_id() {
|
||||||
|
assert_eq!(to_block_id("latest").unwrap(), BlockID::Latest);
|
||||||
|
assert_eq!(to_block_id("0").unwrap(), BlockID::Number(0));
|
||||||
|
assert_eq!(to_block_id("2").unwrap(), BlockID::Number(2));
|
||||||
|
assert_eq!(to_block_id("15").unwrap(), BlockID::Number(15));
|
||||||
|
assert_eq!(
|
||||||
|
to_block_id("9fc84d84f6a785dc1bd5abacfcf9cbdd3b6afb80c0f799bfb2fd42c44a0c224e").unwrap(),
|
||||||
|
BlockID::Hash("9fc84d84f6a785dc1bd5abacfcf9cbdd3b6afb80c0f799bfb2fd42c44a0c224e".parse().unwrap())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_to_u256() {
|
||||||
|
assert_eq!(to_u256("0").unwrap(), U256::from(0));
|
||||||
|
assert_eq!(to_u256("11").unwrap(), U256::from(11));
|
||||||
|
assert_eq!(to_u256("0x11").unwrap(), U256::from(17));
|
||||||
|
assert!(to_u256("u").is_err())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_pending_set() {
|
||||||
|
assert_eq!(to_pending_set("cheap").unwrap(), PendingSet::AlwaysQueue);
|
||||||
|
assert_eq!(to_pending_set("strict").unwrap(), PendingSet::AlwaysSealing);
|
||||||
|
assert_eq!(to_pending_set("lenient").unwrap(), PendingSet::SealingOrElseQueue);
|
||||||
|
assert!(to_pending_set("othe").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_to_address() {
|
||||||
|
assert_eq!(
|
||||||
|
to_address(Some("0xD9A111feda3f362f55Ef1744347CDC8Dd9964a41".into())).unwrap(),
|
||||||
|
"D9A111feda3f362f55Ef1744347CDC8Dd9964a41".parse().unwrap()
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
to_address(Some("D9A111feda3f362f55Ef1744347CDC8Dd9964a41".into())).unwrap(),
|
||||||
|
"D9A111feda3f362f55Ef1744347CDC8Dd9964a41".parse().unwrap()
|
||||||
|
);
|
||||||
|
assert_eq!(to_address(None).unwrap(), Default::default());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_to_price() {
|
||||||
|
assert_eq!(to_price("1").unwrap(), 1.0);
|
||||||
|
assert_eq!(to_price("2.3").unwrap(), 2.3);
|
||||||
|
assert_eq!(to_price("2.33").unwrap(), 2.33);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[cfg(windows)]
|
||||||
|
fn test_geth_ipc_path() {
|
||||||
|
assert_eq!(geth_ipc_path(true), r"\\.\pipe\geth.ipc".to_owned());
|
||||||
|
assert_eq!(geth_ipc_path(false), r"\\.\pipe\geth.ipc".to_owned());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[cfg(not(windows))]
|
||||||
|
fn test_geth_ipc_path() {
|
||||||
|
use util::path;
|
||||||
|
assert_eq!(geth_ipc_path(true), path::ethereum::with_testnet("geth.ipc").to_str().unwrap().to_owned());
|
||||||
|
assert_eq!(geth_ipc_path(false), path::ethereum::with_default("geth.ipc").to_str().unwrap().to_owned());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_to_bootnodes() {
|
||||||
|
let one_bootnode = "enode://e731347db0521f3476e6bbbb83375dcd7133a1601425ebd15fd10f3835fd4c304fba6282087ca5a0deeafadf0aa0d4fd56c3323331901c1f38bd181c283e3e35@128.199.55.137:30303";
|
||||||
|
let two_bootnodes = "enode://e731347db0521f3476e6bbbb83375dcd7133a1601425ebd15fd10f3835fd4c304fba6282087ca5a0deeafadf0aa0d4fd56c3323331901c1f38bd181c283e3e35@128.199.55.137:30303,enode://e731347db0521f3476e6bbbb83375dcd7133a1601425ebd15fd10f3835fd4c304fba6282087ca5a0deeafadf0aa0d4fd56c3323331901c1f38bd181c283e3e35@128.199.55.137:30303";
|
||||||
|
|
||||||
|
assert_eq!(to_bootnodes(&Some("".into())), Ok(vec![]));
|
||||||
|
assert_eq!(to_bootnodes(&None), Ok(vec![]));
|
||||||
|
assert_eq!(to_bootnodes(&Some(one_bootnode.into())), Ok(vec![one_bootnode.into()]));
|
||||||
|
assert_eq!(to_bootnodes(&Some(two_bootnodes.into())), Ok(vec![one_bootnode.into(), one_bootnode.into()]));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
567
parity/main.rs
567
parity/main.rs
@ -31,10 +31,9 @@ extern crate ethsync;
|
|||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate log as rlog;
|
extern crate log as rlog;
|
||||||
extern crate env_logger;
|
extern crate env_logger;
|
||||||
|
extern crate ethcore_logger;
|
||||||
extern crate ctrlc;
|
extern crate ctrlc;
|
||||||
extern crate fdlimit;
|
extern crate fdlimit;
|
||||||
#[cfg(not(windows))]
|
|
||||||
extern crate daemonize;
|
|
||||||
extern crate time;
|
extern crate time;
|
||||||
extern crate number_prefix;
|
extern crate number_prefix;
|
||||||
extern crate rpassword;
|
extern crate rpassword;
|
||||||
@ -53,15 +52,12 @@ extern crate ansi_term;
|
|||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate lazy_static;
|
extern crate lazy_static;
|
||||||
extern crate regex;
|
extern crate regex;
|
||||||
extern crate ethcore_logger;
|
|
||||||
extern crate isatty;
|
extern crate isatty;
|
||||||
|
|
||||||
#[cfg(feature = "dapps")]
|
#[cfg(feature = "dapps")]
|
||||||
extern crate ethcore_dapps;
|
extern crate ethcore_dapps;
|
||||||
|
|
||||||
|
mod cache;
|
||||||
#[macro_use]
|
|
||||||
mod die;
|
|
||||||
mod upgrade;
|
mod upgrade;
|
||||||
mod rpc;
|
mod rpc;
|
||||||
mod dapps;
|
mod dapps;
|
||||||
@ -73,529 +69,56 @@ mod migration;
|
|||||||
mod signer;
|
mod signer;
|
||||||
mod rpc_apis;
|
mod rpc_apis;
|
||||||
mod url;
|
mod url;
|
||||||
|
mod helpers;
|
||||||
|
mod params;
|
||||||
|
mod deprecated;
|
||||||
|
mod dir;
|
||||||
mod modules;
|
mod modules;
|
||||||
|
mod account;
|
||||||
|
mod blockchain;
|
||||||
|
mod presale;
|
||||||
|
mod run;
|
||||||
|
|
||||||
use std::io::{Write, Read, BufReader, BufRead};
|
use std::{process, env};
|
||||||
use std::ops::Deref;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::path::Path;
|
|
||||||
use std::fs::File;
|
|
||||||
use std::str::{FromStr, from_utf8};
|
|
||||||
use std::thread::sleep;
|
|
||||||
use std::time::Duration;
|
|
||||||
use rustc_serialize::hex::FromHex;
|
|
||||||
use ctrlc::CtrlC;
|
|
||||||
use util::{H256, ToPretty, PayloadInfo, Bytes, Colour, version, journaldb, RotatingLogger};
|
|
||||||
use util::panics::{MayPanic, ForwardPanic, PanicHandler};
|
|
||||||
use ethcore::client::{BlockID, BlockChainClient, ClientConfig, get_db_path, BlockImportError, Mode};
|
|
||||||
use ethcore::error::{ImportError};
|
|
||||||
use ethcore::service::ClientService;
|
|
||||||
use ethcore::spec::Spec;
|
|
||||||
use ethsync::{NetworkConfiguration};
|
|
||||||
use ethcore::miner::{Miner, MinerService, ExternalMiner};
|
|
||||||
use migration::migrate;
|
|
||||||
use informant::Informant;
|
|
||||||
use util::{Mutex, Condvar};
|
|
||||||
use ethcore_logger::setup_log;
|
|
||||||
#[cfg(feature="ipc")]
|
|
||||||
use ethcore::client::ChainNotify;
|
|
||||||
|
|
||||||
use die::*;
|
|
||||||
use cli::print_version;
|
use cli::print_version;
|
||||||
use rpc::RpcServer;
|
use configuration::{Cmd, Configuration};
|
||||||
use signer::{SignerServer, new_token};
|
use deprecated::find_deprecated;
|
||||||
use dapps::WebappServer;
|
|
||||||
use io_handler::ClientIoHandler;
|
fn execute(command: Cmd) -> Result<String, String> {
|
||||||
use configuration::{Configuration};
|
match command {
|
||||||
|
Cmd::Run(run_cmd) => {
|
||||||
|
try!(run::execute(run_cmd));
|
||||||
|
Ok("".into())
|
||||||
|
},
|
||||||
|
Cmd::Version => Ok(print_version()),
|
||||||
|
Cmd::Account(account_cmd) => account::execute(account_cmd),
|
||||||
|
Cmd::ImportPresaleWallet(presale_cmd) => presale::execute(presale_cmd),
|
||||||
|
Cmd::Blockchain(blockchain_cmd) => blockchain::execute(blockchain_cmd),
|
||||||
|
Cmd::SignerToken(path) => signer::new_token(path),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start() -> Result<String, String> {
|
||||||
|
let conf = Configuration::parse(env::args()).unwrap_or_else(|e| e.exit());
|
||||||
|
|
||||||
|
let deprecated = find_deprecated(&conf.args);
|
||||||
|
for d in deprecated {
|
||||||
|
println!("{}", d);
|
||||||
|
}
|
||||||
|
|
||||||
|
let cmd = try!(conf.into_command());
|
||||||
|
execute(cmd)
|
||||||
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let conf = Configuration::parse();
|
match start() {
|
||||||
execute(conf);
|
Ok(result) => {
|
||||||
}
|
print!("{}", result);
|
||||||
|
|
||||||
fn execute(conf: Configuration) {
|
|
||||||
if conf.args.flag_version {
|
|
||||||
print_version();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if conf.args.cmd_signer {
|
|
||||||
execute_signer(conf);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
let spec = conf.spec();
|
|
||||||
let client_config = conf.client_config(&spec);
|
|
||||||
|
|
||||||
execute_upgrades(&conf, &spec, &client_config);
|
|
||||||
|
|
||||||
if conf.args.cmd_daemon {
|
|
||||||
daemonize(&conf);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Setup panic handler
|
|
||||||
let panic_handler = PanicHandler::new_in_arc();
|
|
||||||
// Setup logging
|
|
||||||
let logger = setup_log(&conf.log_settings());
|
|
||||||
// Raise fdlimit
|
|
||||||
unsafe { ::fdlimit::raise_fd_limit(); }
|
|
||||||
|
|
||||||
if conf.args.cmd_account {
|
|
||||||
execute_account_cli(conf);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if conf.args.cmd_wallet {
|
|
||||||
execute_wallet_cli(conf);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if conf.args.cmd_export {
|
|
||||||
execute_export(conf, panic_handler);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if conf.args.cmd_import {
|
|
||||||
execute_import(conf, panic_handler);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
execute_client(conf, spec, client_config, panic_handler, logger);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(windows))]
|
|
||||||
fn daemonize(conf: &Configuration) {
|
|
||||||
use daemonize::Daemonize;
|
|
||||||
Daemonize::new()
|
|
||||||
.pid_file(conf.args.arg_pid_file.clone())
|
|
||||||
.chown_pid_file(true)
|
|
||||||
.start()
|
|
||||||
.unwrap_or_else(|e| die!("Couldn't daemonize; {}", e));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(windows)]
|
|
||||||
fn daemonize(_conf: &Configuration) {
|
|
||||||
}
|
|
||||||
|
|
||||||
fn execute_upgrades(conf: &Configuration, spec: &Spec, client_config: &ClientConfig) {
|
|
||||||
match ::upgrade::upgrade(Some(&conf.path())) {
|
|
||||||
Ok(upgrades_applied) if upgrades_applied > 0 => {
|
|
||||||
debug!("Executed {} upgrade scripts - ok", upgrades_applied);
|
|
||||||
},
|
},
|
||||||
Err(e) => {
|
Err(err) => {
|
||||||
die!("Error upgrading parity data: {:?}", e);
|
print!("{}", err);
|
||||||
},
|
process::exit(1);
|
||||||
_ => {},
|
|
||||||
}
|
|
||||||
|
|
||||||
let db_path = get_db_path(Path::new(&conf.path()), client_config.pruning, spec.genesis_header().hash(), spec.fork_name.as_ref());
|
|
||||||
let result = migrate(&db_path, client_config.pruning);
|
|
||||||
if let Err(err) = result {
|
|
||||||
die_with_message(&format!("{} DB path: {}", err, db_path.to_string_lossy()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn execute_client(conf: Configuration, spec: Spec, client_config: ClientConfig, panic_handler: Arc<PanicHandler>, logger: Arc<RotatingLogger>) {
|
|
||||||
let mut hypervisor = modules::hypervisor();
|
|
||||||
|
|
||||||
info!("Starting {}", Colour::White.bold().paint(format!("{}", version())));
|
|
||||||
info!("Using state DB journalling strategy {}", Colour::White.bold().paint(match client_config.pruning {
|
|
||||||
journaldb::Algorithm::Archive => "archive",
|
|
||||||
journaldb::Algorithm::EarlyMerge => "light",
|
|
||||||
journaldb::Algorithm::OverlayRecent => "fast",
|
|
||||||
journaldb::Algorithm::RefCounted => "basic",
|
|
||||||
}));
|
|
||||||
|
|
||||||
// Display warning about using experimental journaldb types
|
|
||||||
match client_config.pruning {
|
|
||||||
journaldb::Algorithm::EarlyMerge | journaldb::Algorithm::RefCounted => {
|
|
||||||
warn!("Your chosen strategy is {}! You can re-run with --pruning to change.", Colour::Red.bold().paint("unstable"));
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Display warning about using unlock with signer
|
|
||||||
if conf.signer_enabled() && conf.args.flag_unlock.is_some() {
|
|
||||||
warn!("Using Trusted Signer and --unlock is not recommended!");
|
|
||||||
warn!("NOTE that Signer will not ask you to confirm transactions from unlocked account.");
|
|
||||||
}
|
|
||||||
|
|
||||||
let net_settings = conf.net_settings(&spec);
|
|
||||||
let sync_config = conf.sync_config(&spec);
|
|
||||||
|
|
||||||
// Secret Store
|
|
||||||
let account_service = Arc::new(conf.account_service());
|
|
||||||
|
|
||||||
// Miner
|
|
||||||
let miner = Miner::new(conf.miner_options(), conf.gas_pricer(), conf.spec(), Some(account_service.clone()));
|
|
||||||
miner.set_author(conf.author().unwrap_or_default());
|
|
||||||
miner.set_gas_floor_target(conf.gas_floor_target());
|
|
||||||
miner.set_gas_ceil_target(conf.gas_ceil_target());
|
|
||||||
miner.set_extra_data(conf.extra_data());
|
|
||||||
miner.set_transactions_limit(conf.args.flag_tx_queue_size);
|
|
||||||
|
|
||||||
// Build client
|
|
||||||
let service = ClientService::start(
|
|
||||||
client_config,
|
|
||||||
spec,
|
|
||||||
Path::new(&conf.path()),
|
|
||||||
miner.clone(),
|
|
||||||
).unwrap_or_else(|e| die_with_error("Client", e));
|
|
||||||
|
|
||||||
panic_handler.forward_from(&service);
|
|
||||||
let client = service.client();
|
|
||||||
|
|
||||||
let external_miner = Arc::new(ExternalMiner::default());
|
|
||||||
let network_settings = Arc::new(conf.network_settings());
|
|
||||||
|
|
||||||
// Sync
|
|
||||||
let (sync_provider, manage_network, chain_notify) =
|
|
||||||
modules::sync(&mut hypervisor, sync_config, NetworkConfiguration::from(net_settings), client.clone(), &conf.log_settings())
|
|
||||||
.unwrap_or_else(|e| die_with_error("Sync", e));
|
|
||||||
|
|
||||||
service.add_notify(chain_notify.clone());
|
|
||||||
|
|
||||||
// if network is active by default
|
|
||||||
if match conf.mode() { Mode::Dark(..) => false, _ => !conf.args.flag_no_network } {
|
|
||||||
chain_notify.start();
|
|
||||||
}
|
|
||||||
|
|
||||||
let deps_for_rpc_apis = Arc::new(rpc_apis::Dependencies {
|
|
||||||
signer_port: conf.signer_port(),
|
|
||||||
signer_queue: Arc::new(rpc_apis::ConfirmationsQueue::default()),
|
|
||||||
client: client.clone(),
|
|
||||||
sync: sync_provider.clone(),
|
|
||||||
net: manage_network.clone(),
|
|
||||||
secret_store: account_service.clone(),
|
|
||||||
miner: miner.clone(),
|
|
||||||
external_miner: external_miner.clone(),
|
|
||||||
logger: logger.clone(),
|
|
||||||
settings: network_settings.clone(),
|
|
||||||
allow_pending_receipt_query: !conf.args.flag_geth,
|
|
||||||
net_service: manage_network.clone(),
|
|
||||||
});
|
|
||||||
|
|
||||||
let dependencies = rpc::Dependencies {
|
|
||||||
panic_handler: panic_handler.clone(),
|
|
||||||
apis: deps_for_rpc_apis.clone(),
|
|
||||||
};
|
|
||||||
|
|
||||||
// Setup http rpc
|
|
||||||
let rpc_server = rpc::new_http(rpc::HttpConfiguration {
|
|
||||||
enabled: network_settings.rpc_enabled,
|
|
||||||
interface: conf.rpc_interface(),
|
|
||||||
port: network_settings.rpc_port,
|
|
||||||
apis: conf.rpc_apis(),
|
|
||||||
cors: conf.rpc_cors(),
|
|
||||||
hosts: conf.rpc_hosts(),
|
|
||||||
}, &dependencies);
|
|
||||||
|
|
||||||
// setup ipc rpc
|
|
||||||
let _ipc_server = rpc::new_ipc(conf.ipc_settings(), &dependencies);
|
|
||||||
debug!("IPC: {}", conf.ipc_settings());
|
|
||||||
|
|
||||||
if conf.args.flag_webapp { println!("WARNING: Flag -w/--webapp is deprecated. Dapps server is now on by default. Ignoring."); }
|
|
||||||
let dapps_server = dapps::new(dapps::Configuration {
|
|
||||||
enabled: conf.dapps_enabled(),
|
|
||||||
interface: conf.dapps_interface(),
|
|
||||||
port: conf.args.flag_dapps_port,
|
|
||||||
user: conf.args.flag_dapps_user.clone(),
|
|
||||||
pass: conf.args.flag_dapps_pass.clone(),
|
|
||||||
dapps_path: conf.directories().dapps,
|
|
||||||
}, dapps::Dependencies {
|
|
||||||
panic_handler: panic_handler.clone(),
|
|
||||||
apis: deps_for_rpc_apis.clone(),
|
|
||||||
});
|
|
||||||
|
|
||||||
// Set up a signer
|
|
||||||
let signer_server = signer::start(signer::Configuration {
|
|
||||||
enabled: conf.signer_enabled(),
|
|
||||||
port: conf.args.flag_signer_port,
|
|
||||||
signer_path: conf.directories().signer,
|
|
||||||
}, signer::Dependencies {
|
|
||||||
panic_handler: panic_handler.clone(),
|
|
||||||
apis: deps_for_rpc_apis.clone(),
|
|
||||||
});
|
|
||||||
|
|
||||||
let informant = Arc::new(Informant::new(service.client(), Some(sync_provider.clone()), Some(manage_network.clone()), conf.have_color()));
|
|
||||||
service.add_notify(informant.clone());
|
|
||||||
// Register IO handler
|
|
||||||
let io_handler = Arc::new(ClientIoHandler {
|
|
||||||
client: service.client(),
|
|
||||||
info: informant,
|
|
||||||
sync: sync_provider.clone(),
|
|
||||||
net: manage_network.clone(),
|
|
||||||
accounts: account_service.clone(),
|
|
||||||
});
|
|
||||||
service.register_io_handler(io_handler).expect("Error registering IO handler");
|
|
||||||
|
|
||||||
if conf.args.cmd_ui {
|
|
||||||
if !conf.dapps_enabled() {
|
|
||||||
die_with_message("Cannot use UI command with Dapps turned off.");
|
|
||||||
}
|
|
||||||
url::open(&format!("http://{}:{}/", conf.dapps_interface(), conf.args.flag_dapps_port));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle exit
|
|
||||||
wait_for_exit(panic_handler, rpc_server, dapps_server, signer_server);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn flush_stdout() {
|
|
||||||
::std::io::stdout().flush().expect("stdout is flushable; qed");
|
|
||||||
}
|
|
||||||
|
|
||||||
enum DataFormat {
|
|
||||||
Hex,
|
|
||||||
Binary,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn execute_export(conf: Configuration, panic_handler: Arc<PanicHandler>) {
|
|
||||||
let spec = conf.spec();
|
|
||||||
let client_config = conf.client_config(&spec);
|
|
||||||
|
|
||||||
// Build client
|
|
||||||
let service = ClientService::start(
|
|
||||||
client_config, spec, Path::new(&conf.path()), Arc::new(Miner::with_spec(conf.spec()))
|
|
||||||
).unwrap_or_else(|e| die_with_error("Client", e));
|
|
||||||
|
|
||||||
panic_handler.forward_from(&service);
|
|
||||||
let client = service.client();
|
|
||||||
|
|
||||||
// we have a client!
|
|
||||||
let parse_block_id = |s: &str, arg: &str| -> u64 {
|
|
||||||
if s == "latest" {
|
|
||||||
client.chain_info().best_block_number
|
|
||||||
} else if let Ok(n) = s.parse::<u64>() {
|
|
||||||
n
|
|
||||||
} else if let Ok(h) = H256::from_str(s) {
|
|
||||||
client.block_number(BlockID::Hash(h)).unwrap_or_else(|| {
|
|
||||||
die!("Unknown block hash passed to {} parameter: {:?}", arg, s);
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
die!("Invalid {} parameter given: {:?}", arg, s);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let from = parse_block_id(&conf.args.flag_from, "--from");
|
|
||||||
let to = parse_block_id(&conf.args.flag_to, "--to");
|
|
||||||
let format = match conf.args.flag_format {
|
|
||||||
Some(x) => match x.deref() {
|
|
||||||
"binary" | "bin" => DataFormat::Binary,
|
|
||||||
"hex" => DataFormat::Hex,
|
|
||||||
x => die!("Invalid --format parameter given: {:?}", x),
|
|
||||||
},
|
|
||||||
None if conf.args.arg_file.is_none() => DataFormat::Hex,
|
|
||||||
None => DataFormat::Binary,
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut out: Box<Write> = if let Some(f) = conf.args.arg_file {
|
|
||||||
Box::new(File::create(&f).unwrap_or_else(|_| die!("Cannot write to file given: {}", f)))
|
|
||||||
} else {
|
|
||||||
Box::new(::std::io::stdout())
|
|
||||||
};
|
|
||||||
|
|
||||||
for i in from..(to + 1) {
|
|
||||||
let b = client.deref().block(BlockID::Number(i)).unwrap();
|
|
||||||
match format {
|
|
||||||
DataFormat::Binary => { out.write(&b).expect("Couldn't write to stream."); }
|
|
||||||
DataFormat::Hex => { out.write_fmt(format_args!("{}", b.pretty())).expect("Couldn't write to stream."); }
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn execute_import(conf: Configuration, panic_handler: Arc<PanicHandler>) {
|
|
||||||
let spec = conf.spec();
|
|
||||||
let client_config = conf.client_config(&spec);
|
|
||||||
|
|
||||||
// Build client
|
|
||||||
let service = ClientService::start(
|
|
||||||
client_config, spec, Path::new(&conf.path()), Arc::new(Miner::with_spec(conf.spec()))
|
|
||||||
).unwrap_or_else(|e| die_with_error("Client", e));
|
|
||||||
|
|
||||||
panic_handler.forward_from(&service);
|
|
||||||
let client = service.client();
|
|
||||||
|
|
||||||
let mut instream: Box<Read> = if let Some(ref f) = conf.args.arg_file {
|
|
||||||
let f = File::open(f).unwrap_or_else(|_| die!("Cannot open the file given: {}", f));
|
|
||||||
Box::new(f)
|
|
||||||
} else {
|
|
||||||
Box::new(::std::io::stdin())
|
|
||||||
};
|
|
||||||
|
|
||||||
const READAHEAD_BYTES: usize = 8;
|
|
||||||
|
|
||||||
let mut first_bytes: Bytes = vec![0; READAHEAD_BYTES];
|
|
||||||
let mut first_read = 0;
|
|
||||||
|
|
||||||
let format = match conf.args.flag_format {
|
|
||||||
Some(ref x) => match x.deref() {
|
|
||||||
"binary" | "bin" => DataFormat::Binary,
|
|
||||||
"hex" => DataFormat::Hex,
|
|
||||||
x => die!("Invalid --format parameter given: {:?}", x),
|
|
||||||
},
|
|
||||||
None => {
|
|
||||||
// autodetect...
|
|
||||||
first_read = instream.read(&mut(first_bytes[..])).unwrap_or_else(|_| die!("Error reading from the file/stream."));
|
|
||||||
match first_bytes[0] {
|
|
||||||
0xf9 => {
|
|
||||||
info!("Autodetected binary data format.");
|
|
||||||
DataFormat::Binary
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
info!("Autodetected hex data format.");
|
|
||||||
DataFormat::Hex
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let informant = Informant::new(client.clone(), None, None, conf.have_color());
|
|
||||||
|
|
||||||
let do_import = |bytes| {
|
|
||||||
while client.queue_info().is_full() { sleep(Duration::from_secs(1)); }
|
|
||||||
match client.import_block(bytes) {
|
|
||||||
Ok(_) => {}
|
|
||||||
Err(BlockImportError::Import(ImportError::AlreadyInChain)) => { trace!("Skipping block already in chain."); }
|
|
||||||
Err(e) => die!("Cannot import block: {:?}", e)
|
|
||||||
}
|
|
||||||
informant.tick();
|
|
||||||
};
|
|
||||||
|
|
||||||
match format {
|
|
||||||
DataFormat::Binary => {
|
|
||||||
loop {
|
|
||||||
let mut bytes: Bytes = if first_read > 0 {first_bytes.clone()} else {vec![0; READAHEAD_BYTES]};
|
|
||||||
let n = if first_read > 0 {first_read} else {instream.read(&mut(bytes[..])).unwrap_or_else(|_| die!("Error reading from the file/stream."))};
|
|
||||||
if n == 0 { break; }
|
|
||||||
first_read = 0;
|
|
||||||
let s = PayloadInfo::from(&(bytes[..])).unwrap_or_else(|e| die!("Invalid RLP in the file/stream: {:?}", e)).total();
|
|
||||||
bytes.resize(s, 0);
|
|
||||||
instream.read_exact(&mut(bytes[READAHEAD_BYTES..])).unwrap_or_else(|_| die!("Error reading from the file/stream."));
|
|
||||||
do_import(bytes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
DataFormat::Hex => {
|
|
||||||
for line in BufReader::new(instream).lines() {
|
|
||||||
let s = line.unwrap_or_else(|_| die!("Error reading from the file/stream."));
|
|
||||||
let s = if first_read > 0 {from_utf8(&first_bytes).unwrap().to_owned() + &(s[..])} else {s};
|
|
||||||
first_read = 0;
|
|
||||||
let bytes = FromHex::from_hex(&(s[..])).unwrap_or_else(|_| die!("Invalid hex in file/stream."));
|
|
||||||
do_import(bytes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
while !client.queue_info().is_empty() {
|
|
||||||
sleep(Duration::from_secs(1));
|
|
||||||
informant.tick();
|
|
||||||
}
|
|
||||||
client.flush_queue();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn execute_signer(conf: Configuration) {
|
|
||||||
if !conf.args.cmd_new_token {
|
|
||||||
die!("Unknown command.");
|
|
||||||
}
|
|
||||||
|
|
||||||
let path = conf.directories().signer;
|
|
||||||
let code = new_token(path).unwrap_or_else(|e| {
|
|
||||||
die!("Error generating token: {:?}", e)
|
|
||||||
});
|
|
||||||
println!("This key code will authorise your System Signer UI: {}", if conf.args.flag_no_color { code } else { format!("{}", Colour::White.bold().paint(code)) });
|
|
||||||
}
|
|
||||||
|
|
||||||
fn execute_account_cli(conf: Configuration) {
|
|
||||||
use ethcore::ethstore::{EthStore, import_accounts};
|
|
||||||
use ethcore::ethstore::dir::DiskDirectory;
|
|
||||||
use ethcore::account_provider::AccountProvider;
|
|
||||||
use rpassword::read_password;
|
|
||||||
|
|
||||||
let dir = Box::new(DiskDirectory::create(conf.keys_path()).unwrap());
|
|
||||||
let iterations = conf.keys_iterations();
|
|
||||||
let secret_store = AccountProvider::new(Box::new(EthStore::open_with_iterations(dir, iterations).unwrap()));
|
|
||||||
|
|
||||||
if conf.args.cmd_new {
|
|
||||||
println!("Please note that password is NOT RECOVERABLE.");
|
|
||||||
print!("Type password: ");
|
|
||||||
flush_stdout();
|
|
||||||
let password = read_password().unwrap();
|
|
||||||
print!("Repeat password: ");
|
|
||||||
flush_stdout();
|
|
||||||
let password_repeat = read_password().unwrap();
|
|
||||||
if password != password_repeat {
|
|
||||||
println!("Passwords do not match!");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
println!("New account address:");
|
|
||||||
let new_address = secret_store.new_account(&password).unwrap();
|
|
||||||
println!("{:?}", new_address);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if conf.args.cmd_list {
|
|
||||||
println!("Known addresses:");
|
|
||||||
for addr in &secret_store.accounts() {
|
|
||||||
println!("{:?}", addr);
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if conf.args.cmd_import {
|
|
||||||
let to = DiskDirectory::create(conf.keys_path()).unwrap();
|
|
||||||
let mut imported = 0;
|
|
||||||
for path in &conf.args.arg_path {
|
|
||||||
let from = DiskDirectory::at(path);
|
|
||||||
imported += import_accounts(&from, &to).unwrap_or_else(|e| die!("Could not import accounts {}", e)).len();
|
|
||||||
}
|
|
||||||
println!("Imported {} keys", imported);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn execute_wallet_cli(conf: Configuration) {
|
|
||||||
use ethcore::ethstore::{PresaleWallet, EthStore};
|
|
||||||
use ethcore::ethstore::dir::DiskDirectory;
|
|
||||||
use ethcore::account_provider::AccountProvider;
|
|
||||||
|
|
||||||
let wallet_path = conf.args.arg_path.first().unwrap();
|
|
||||||
let filename = conf.args.flag_password.first().unwrap();
|
|
||||||
let mut file = File::open(filename).unwrap_or_else(|_| die!("{} Unable to read password file.", filename));
|
|
||||||
let mut file_content = String::new();
|
|
||||||
file.read_to_string(&mut file_content).unwrap_or_else(|_| die!("{} Unable to read password file.", filename));
|
|
||||||
|
|
||||||
let dir = Box::new(DiskDirectory::create(conf.keys_path()).unwrap());
|
|
||||||
let iterations = conf.keys_iterations();
|
|
||||||
let store = AccountProvider::new(Box::new(EthStore::open_with_iterations(dir, iterations).unwrap()));
|
|
||||||
|
|
||||||
// remove eof
|
|
||||||
let pass = &file_content[..file_content.len() - 1];
|
|
||||||
let wallet = PresaleWallet::open(wallet_path).unwrap_or_else(|_| die!("Unable to open presale wallet."));
|
|
||||||
let kp = wallet.decrypt(pass).unwrap_or_else(|_| die!("Invalid password"));
|
|
||||||
let address = store.insert_account(kp.secret().clone(), pass).unwrap();
|
|
||||||
|
|
||||||
println!("Imported account: {}", address);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn wait_for_exit(
|
|
||||||
panic_handler: Arc<PanicHandler>,
|
|
||||||
_rpc_server: Option<RpcServer>,
|
|
||||||
_dapps_server: Option<WebappServer>,
|
|
||||||
_signer_server: Option<SignerServer>
|
|
||||||
) {
|
|
||||||
let exit = Arc::new(Condvar::new());
|
|
||||||
|
|
||||||
// Handle possible exits
|
|
||||||
let e = exit.clone();
|
|
||||||
CtrlC::set_handler(move || { e.notify_all(); });
|
|
||||||
|
|
||||||
// Handle panics
|
|
||||||
let e = exit.clone();
|
|
||||||
panic_handler.on_panic(move |_reason| { e.notify_all(); });
|
|
||||||
|
|
||||||
// Wait for signal
|
|
||||||
let mutex = Mutex::new(());
|
|
||||||
exit.wait(&mut mutex.lock());
|
|
||||||
info!("Finishing work, please wait...");
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parity needs at least 1 test to generate coverage reports correctly.
|
|
||||||
#[test]
|
|
||||||
fn if_works() {
|
|
||||||
}
|
|
||||||
|
@ -23,8 +23,7 @@ use ethsync::{SyncConfig, NetworkConfiguration};
|
|||||||
use self::no_ipc_deps::*;
|
use self::no_ipc_deps::*;
|
||||||
#[cfg(feature="ipc")]
|
#[cfg(feature="ipc")]
|
||||||
use self::ipc_deps::*;
|
use self::ipc_deps::*;
|
||||||
|
use ethcore_logger::Config as LogConfig;
|
||||||
use ethcore_logger::Settings as LogSettings;
|
|
||||||
|
|
||||||
#[cfg(not(feature="ipc"))]
|
#[cfg(not(feature="ipc"))]
|
||||||
mod no_ipc_deps {
|
mod no_ipc_deps {
|
||||||
@ -64,7 +63,7 @@ pub fn hypervisor() -> Option<Hypervisor> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature="ipc")]
|
#[cfg(feature="ipc")]
|
||||||
fn sync_arguments(sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, log_settings: &LogSettings) -> BootArgs {
|
fn sync_arguments(sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, log_settings: &LogConfig) -> BootArgs {
|
||||||
let service_config = ServiceConfiguration {
|
let service_config = ServiceConfiguration {
|
||||||
sync: sync_cfg,
|
sync: sync_cfg,
|
||||||
net: net_cfg,
|
net: net_cfg,
|
||||||
@ -96,7 +95,7 @@ pub fn sync
|
|||||||
sync_cfg: SyncConfig,
|
sync_cfg: SyncConfig,
|
||||||
net_cfg: NetworkConfiguration,
|
net_cfg: NetworkConfiguration,
|
||||||
_client: Arc<BlockChainClient>,
|
_client: Arc<BlockChainClient>,
|
||||||
log_settings: &LogSettings,
|
log_settings: &LogConfig,
|
||||||
)
|
)
|
||||||
-> Result<SyncModules, ethcore::error::Error>
|
-> Result<SyncModules, ethcore::error::Error>
|
||||||
{
|
{
|
||||||
@ -121,7 +120,7 @@ pub fn sync
|
|||||||
sync_cfg: SyncConfig,
|
sync_cfg: SyncConfig,
|
||||||
net_cfg: NetworkConfiguration,
|
net_cfg: NetworkConfiguration,
|
||||||
client: Arc<BlockChainClient>,
|
client: Arc<BlockChainClient>,
|
||||||
_log_settings: &LogSettings,
|
_log_settings: &LogConfig,
|
||||||
)
|
)
|
||||||
-> Result<SyncModules, ethcore::error::Error>
|
-> Result<SyncModules, ethcore::error::Error>
|
||||||
{
|
{
|
||||||
|
276
parity/params.rs
Normal file
276
parity/params.rs
Normal file
@ -0,0 +1,276 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::str::FromStr;
|
||||||
|
use std::time::Duration;
|
||||||
|
use util::{contents, DatabaseConfig, journaldb, H256, Address, U256, version_data};
|
||||||
|
use util::journaldb::Algorithm;
|
||||||
|
use ethcore::spec::Spec;
|
||||||
|
use ethcore::ethereum;
|
||||||
|
use ethcore::miner::{GasPricer, GasPriceCalibratorOptions};
|
||||||
|
use dir::Directories;
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum SpecType {
|
||||||
|
Mainnet,
|
||||||
|
Testnet,
|
||||||
|
Olympic,
|
||||||
|
Classic,
|
||||||
|
Custom(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for SpecType {
|
||||||
|
fn default() -> Self {
|
||||||
|
SpecType::Mainnet
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for SpecType {
|
||||||
|
type Err = String;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
let spec = match s {
|
||||||
|
"frontier" | "homestead" | "mainnet" => SpecType::Mainnet,
|
||||||
|
"frontier-dogmatic" | "homestead-dogmatic" | "classic" => SpecType::Classic,
|
||||||
|
"morden" | "testnet" => SpecType::Testnet,
|
||||||
|
"olympic" => SpecType::Olympic,
|
||||||
|
other => SpecType::Custom(other.into()),
|
||||||
|
};
|
||||||
|
Ok(spec)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SpecType {
|
||||||
|
pub fn spec(&self) -> Result<Spec, String> {
|
||||||
|
match *self {
|
||||||
|
SpecType::Mainnet => Ok(ethereum::new_frontier()),
|
||||||
|
SpecType::Testnet => Ok(ethereum::new_morden()),
|
||||||
|
SpecType::Olympic => Ok(ethereum::new_olympic()),
|
||||||
|
SpecType::Classic => Ok(ethereum::new_classic()),
|
||||||
|
SpecType::Custom(ref file) => Ok(Spec::load(&try!(contents(file).map_err(|_| "Could not load specification file."))))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum Pruning {
|
||||||
|
Specific(Algorithm),
|
||||||
|
Auto,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Pruning {
|
||||||
|
fn default() -> Self {
|
||||||
|
Pruning::Auto
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for Pruning {
|
||||||
|
type Err = String;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
match s {
|
||||||
|
"auto" => Ok(Pruning::Auto),
|
||||||
|
other => other.parse().map(Pruning::Specific),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Pruning {
|
||||||
|
pub fn to_algorithm(&self, dirs: &Directories, genesis_hash: H256, fork_name: Option<&String>) -> Algorithm {
|
||||||
|
match *self {
|
||||||
|
Pruning::Specific(algo) => algo,
|
||||||
|
Pruning::Auto => Self::find_best_db(dirs, genesis_hash, fork_name),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn find_best_db(dirs: &Directories, genesis_hash: H256, fork_name: Option<&String>) -> Algorithm {
|
||||||
|
let mut algo_types = Algorithm::all_types();
|
||||||
|
|
||||||
|
// if all dbs have the same latest era, the last element is the default one
|
||||||
|
algo_types.push(Algorithm::default());
|
||||||
|
|
||||||
|
algo_types.into_iter().max_by_key(|i| {
|
||||||
|
let mut client_path = dirs.client_path(genesis_hash, fork_name, *i);
|
||||||
|
client_path.push("state");
|
||||||
|
let db = journaldb::new(client_path.to_str().unwrap(), *i, DatabaseConfig::default());
|
||||||
|
trace!(target: "parity", "Looking for best DB: {} at {:?}", i, db.latest_era());
|
||||||
|
db.latest_era()
|
||||||
|
}).unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub struct ResealPolicy {
|
||||||
|
pub own: bool,
|
||||||
|
pub external: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for ResealPolicy {
|
||||||
|
fn default() -> Self {
|
||||||
|
ResealPolicy {
|
||||||
|
own: true,
|
||||||
|
external: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for ResealPolicy {
|
||||||
|
type Err = String;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
let (own, external) = match s {
|
||||||
|
"none" => (false, false),
|
||||||
|
"own" => (true, false),
|
||||||
|
"ext" => (false, true),
|
||||||
|
"all" => (true, true),
|
||||||
|
x => return Err(format!("Invalid reseal value: {}", x)),
|
||||||
|
};
|
||||||
|
|
||||||
|
let reseal = ResealPolicy {
|
||||||
|
own: own,
|
||||||
|
external: external,
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(reseal)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub struct AccountsConfig {
|
||||||
|
pub iterations: u32,
|
||||||
|
pub import_keys: bool,
|
||||||
|
pub testnet: bool,
|
||||||
|
pub password_files: Vec<String>,
|
||||||
|
pub unlocked_accounts: Vec<Address>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for AccountsConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
AccountsConfig {
|
||||||
|
iterations: 10240,
|
||||||
|
import_keys: true,
|
||||||
|
testnet: false,
|
||||||
|
password_files: Vec::new(),
|
||||||
|
unlocked_accounts: Vec::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum GasPricerConfig {
|
||||||
|
Fixed(U256),
|
||||||
|
Calibrated {
|
||||||
|
usd_per_tx: f32,
|
||||||
|
recalibration_period: Duration,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for GasPricerConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
GasPricerConfig::Calibrated {
|
||||||
|
usd_per_tx: 0.005,
|
||||||
|
recalibration_period: Duration::from_secs(3600),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Into<GasPricer> for GasPricerConfig {
|
||||||
|
fn into(self) -> GasPricer {
|
||||||
|
match self {
|
||||||
|
GasPricerConfig::Fixed(u) => GasPricer::Fixed(u),
|
||||||
|
GasPricerConfig::Calibrated { usd_per_tx, recalibration_period } => {
|
||||||
|
GasPricer::new_calibrated(GasPriceCalibratorOptions {
|
||||||
|
usd_per_tx: usd_per_tx,
|
||||||
|
recalibration_period: recalibration_period,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub struct MinerExtras {
|
||||||
|
pub author: Address,
|
||||||
|
pub extra_data: Vec<u8>,
|
||||||
|
pub gas_floor_target: U256,
|
||||||
|
pub gas_ceil_target: U256,
|
||||||
|
pub transactions_limit: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for MinerExtras {
|
||||||
|
fn default() -> Self {
|
||||||
|
MinerExtras {
|
||||||
|
author: Default::default(),
|
||||||
|
extra_data: version_data(),
|
||||||
|
gas_floor_target: U256::from(4_700_000),
|
||||||
|
gas_ceil_target: U256::from(6_283_184),
|
||||||
|
transactions_limit: 1024,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use util::journaldb::Algorithm;
|
||||||
|
use super::{SpecType, Pruning, ResealPolicy};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_spec_type_parsing() {
|
||||||
|
assert_eq!(SpecType::Mainnet, "frontier".parse().unwrap());
|
||||||
|
assert_eq!(SpecType::Mainnet, "homestead".parse().unwrap());
|
||||||
|
assert_eq!(SpecType::Mainnet, "mainnet".parse().unwrap());
|
||||||
|
assert_eq!(SpecType::Testnet, "testnet".parse().unwrap());
|
||||||
|
assert_eq!(SpecType::Testnet, "morden".parse().unwrap());
|
||||||
|
assert_eq!(SpecType::Olympic, "olympic".parse().unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_spec_type_default() {
|
||||||
|
assert_eq!(SpecType::Mainnet, SpecType::default());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_pruning_parsing() {
|
||||||
|
assert_eq!(Pruning::Auto, "auto".parse().unwrap());
|
||||||
|
assert_eq!(Pruning::Specific(Algorithm::Archive), "archive".parse().unwrap());
|
||||||
|
assert_eq!(Pruning::Specific(Algorithm::EarlyMerge), "light".parse().unwrap());
|
||||||
|
assert_eq!(Pruning::Specific(Algorithm::OverlayRecent), "fast".parse().unwrap());
|
||||||
|
assert_eq!(Pruning::Specific(Algorithm::RefCounted), "basic".parse().unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_pruning_default() {
|
||||||
|
assert_eq!(Pruning::Auto, Pruning::default());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_reseal_policy_parsing() {
|
||||||
|
let none = ResealPolicy { own: false, external: false };
|
||||||
|
let own = ResealPolicy { own: true, external: false };
|
||||||
|
let ext = ResealPolicy { own: false, external: true };
|
||||||
|
let all = ResealPolicy { own: true, external: true };
|
||||||
|
assert_eq!(none, "none".parse().unwrap());
|
||||||
|
assert_eq!(own, "own".parse().unwrap());
|
||||||
|
assert_eq!(ext, "ext".parse().unwrap());
|
||||||
|
assert_eq!(all, "all".parse().unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_reseal_policy_default() {
|
||||||
|
let all = ResealPolicy { own: true, external: true };
|
||||||
|
assert_eq!(all, ResealPolicy::default());
|
||||||
|
}
|
||||||
|
}
|
43
parity/presale.rs
Normal file
43
parity/presale.rs
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use ethcore::ethstore::{PresaleWallet, EthStore};
|
||||||
|
use ethcore::ethstore::dir::DiskDirectory;
|
||||||
|
use ethcore::account_provider::AccountProvider;
|
||||||
|
use helpers::{password_prompt, password_from_file};
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub struct ImportWallet {
|
||||||
|
pub iterations: u32,
|
||||||
|
pub path: String,
|
||||||
|
pub wallet_path: String,
|
||||||
|
pub password_file: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn execute(cmd: ImportWallet) -> Result<String, String> {
|
||||||
|
let password: String = match cmd.password_file {
|
||||||
|
Some(file) => try!(password_from_file(file)),
|
||||||
|
None => try!(password_prompt()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let dir = Box::new(DiskDirectory::create(cmd.path).unwrap());
|
||||||
|
let secret_store = Box::new(EthStore::open_with_iterations(dir, cmd.iterations).unwrap());
|
||||||
|
let acc_provider = AccountProvider::new(secret_store);
|
||||||
|
let wallet = try!(PresaleWallet::open(cmd.wallet_path).map_err(|_| "Unable to open presale wallet."));
|
||||||
|
let kp = try!(wallet.decrypt(&password).map_err(|_| "Invalid password."));
|
||||||
|
let address = acc_provider.insert_account(kp.secret().clone(), &password).unwrap();
|
||||||
|
Ok(format!("{:?}", address))
|
||||||
|
}
|
@ -14,40 +14,64 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::fmt;
|
||||||
use std::str::FromStr;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use util::panics::PanicHandler;
|
use util::panics::PanicHandler;
|
||||||
use die::*;
|
use ethcore_rpc::{RpcServerError, RpcServer as Server};
|
||||||
use jsonipc;
|
use jsonipc;
|
||||||
use rpc_apis;
|
use rpc_apis;
|
||||||
use std::fmt;
|
use rpc_apis::ApiSet;
|
||||||
|
use helpers::parity_ipc_path;
|
||||||
|
|
||||||
pub use ethcore_rpc::Server as RpcServer;
|
pub use jsonipc::Server as IpcServer;
|
||||||
use ethcore_rpc::{RpcServerError, RpcServer as Server};
|
pub use ethcore_rpc::Server as HttpServer;
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
pub struct HttpConfiguration {
|
pub struct HttpConfiguration {
|
||||||
pub enabled: bool,
|
pub enabled: bool,
|
||||||
pub interface: String,
|
pub interface: String,
|
||||||
pub port: u16,
|
pub port: u16,
|
||||||
pub apis: String,
|
pub apis: ApiSet,
|
||||||
pub cors: Option<Vec<String>>,
|
pub cors: Option<Vec<String>>,
|
||||||
pub hosts: Option<Vec<String>>,
|
pub hosts: Option<Vec<String>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for HttpConfiguration {
|
||||||
|
fn default() -> Self {
|
||||||
|
HttpConfiguration {
|
||||||
|
enabled: true,
|
||||||
|
interface: "127.0.0.1".into(),
|
||||||
|
port: 8545,
|
||||||
|
apis: ApiSet::UnsafeContext,
|
||||||
|
cors: None,
|
||||||
|
hosts: Some(Vec::new()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
pub struct IpcConfiguration {
|
pub struct IpcConfiguration {
|
||||||
pub enabled: bool,
|
pub enabled: bool,
|
||||||
pub socket_addr: String,
|
pub socket_addr: String,
|
||||||
pub apis: String,
|
pub apis: ApiSet,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for IpcConfiguration {
|
||||||
|
fn default() -> Self {
|
||||||
|
IpcConfiguration {
|
||||||
|
enabled: true,
|
||||||
|
socket_addr: parity_ipc_path("$HOME/.parity/jsonrpc.ipc"),
|
||||||
|
apis: ApiSet::UnsafeContext,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for IpcConfiguration {
|
impl fmt::Display for IpcConfiguration {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
if self.enabled {
|
if self.enabled {
|
||||||
write!(f, "endpoint address [{}], api list [{}]", self.socket_addr, self.apis)
|
write!(f, "endpoint address [{}], api list [{:?}]", self.socket_addr, self.apis)
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
write!(f, "disabled")
|
write!(f, "disabled")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -58,22 +82,19 @@ pub struct Dependencies {
|
|||||||
pub apis: Arc<rpc_apis::Dependencies>,
|
pub apis: Arc<rpc_apis::Dependencies>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_http(conf: HttpConfiguration, deps: &Dependencies) -> Option<RpcServer> {
|
pub fn new_http(conf: HttpConfiguration, deps: &Dependencies) -> Result<Option<HttpServer>, String> {
|
||||||
if !conf.enabled {
|
if !conf.enabled {
|
||||||
return None;
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
let apis = conf.apis.split(',').collect();
|
|
||||||
let url = format!("{}:{}", conf.interface, conf.port);
|
let url = format!("{}:{}", conf.interface, conf.port);
|
||||||
let addr = SocketAddr::from_str(&url).unwrap_or_else(|_| die!("{}: Invalid JSONRPC listen host/port given.", url));
|
let addr = try!(url.parse().map_err(|_| format!("Invalid JSONRPC listen host/port given: {}", url)));
|
||||||
|
Ok(Some(try!(setup_http_rpc_server(deps, &addr, conf.cors, conf.hosts, conf.apis))))
|
||||||
Some(setup_http_rpc_server(deps, &addr, conf.cors, conf.hosts, apis))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn setup_rpc_server(apis: Vec<&str>, deps: &Dependencies) -> Server {
|
fn setup_rpc_server(apis: ApiSet, deps: &Dependencies) -> Result<Server, String> {
|
||||||
let apis = rpc_apis::from_str(apis);
|
|
||||||
let server = Server::new();
|
let server = Server::new();
|
||||||
rpc_apis::setup_rpc(server, deps.apis.clone(), rpc_apis::ApiSet::List(apis))
|
Ok(rpc_apis::setup_rpc(server, deps.apis.clone(), apis))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn setup_http_rpc_server(
|
pub fn setup_http_rpc_server(
|
||||||
@ -81,29 +102,28 @@ pub fn setup_http_rpc_server(
|
|||||||
url: &SocketAddr,
|
url: &SocketAddr,
|
||||||
cors_domains: Option<Vec<String>>,
|
cors_domains: Option<Vec<String>>,
|
||||||
allowed_hosts: Option<Vec<String>>,
|
allowed_hosts: Option<Vec<String>>,
|
||||||
apis: Vec<&str>,
|
apis: ApiSet
|
||||||
) -> RpcServer {
|
) -> Result<HttpServer, String> {
|
||||||
let server = setup_rpc_server(apis, dependencies);
|
let server = try!(setup_rpc_server(apis, dependencies));
|
||||||
let ph = dependencies.panic_handler.clone();
|
let ph = dependencies.panic_handler.clone();
|
||||||
let start_result = server.start_http(url, cors_domains, allowed_hosts, ph);
|
let start_result = server.start_http(url, cors_domains, allowed_hosts, ph);
|
||||||
match start_result {
|
match start_result {
|
||||||
Err(RpcServerError::IoError(err)) => die_with_io_error("RPC", err),
|
Err(RpcServerError::IoError(err)) => Err(format!("RPC io error: {}", err)),
|
||||||
Err(e) => die!("RPC: {:?}", e),
|
Err(e) => Err(format!("RPC error: {:?}", e)),
|
||||||
Ok(server) => server,
|
Ok(server) => Ok(server),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_ipc(conf: IpcConfiguration, deps: &Dependencies) -> Option<jsonipc::Server> {
|
pub fn new_ipc(conf: IpcConfiguration, deps: &Dependencies) -> Result<Option<IpcServer>, String> {
|
||||||
if !conf.enabled { return None; }
|
if !conf.enabled { return Ok(None); }
|
||||||
let apis = conf.apis.split(',').collect();
|
Ok(Some(try!(setup_ipc_rpc_server(deps, &conf.socket_addr, conf.apis))))
|
||||||
Some(setup_ipc_rpc_server(deps, &conf.socket_addr, apis))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn setup_ipc_rpc_server(dependencies: &Dependencies, addr: &str, apis: Vec<&str>) -> jsonipc::Server {
|
pub fn setup_ipc_rpc_server(dependencies: &Dependencies, addr: &str, apis: ApiSet) -> Result<IpcServer, String> {
|
||||||
let server = setup_rpc_server(apis, dependencies);
|
let server = try!(setup_rpc_server(apis, dependencies));
|
||||||
match server.start_ipc(addr) {
|
match server.start_ipc(addr) {
|
||||||
Err(jsonipc::Error::Io(io_error)) => die_with_io_error("RPC", io_error),
|
Err(jsonipc::Error::Io(io_error)) => Err(format!("RPC io error: {}", io_error)),
|
||||||
Err(any_error) => die!("RPC: {:?}", any_error),
|
Err(any_error) => Err(format!("Rpc error: {:?}", any_error)),
|
||||||
Ok(server) => server
|
Ok(server) => Ok(server)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,20 +15,21 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
use std::collections::HashSet;
|
||||||
|
use std::cmp::PartialEq;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use util::RotatingLogger;
|
||||||
use ethsync::{ManageNetwork, SyncProvider};
|
use util::network_settings::NetworkSettings;
|
||||||
use ethcore::miner::{Miner, ExternalMiner};
|
use ethcore::miner::{Miner, ExternalMiner};
|
||||||
use ethcore::client::Client;
|
use ethcore::client::Client;
|
||||||
use util::RotatingLogger;
|
|
||||||
use ethcore::account_provider::AccountProvider;
|
use ethcore::account_provider::AccountProvider;
|
||||||
use util::network_settings::NetworkSettings;
|
use ethsync::{ManageNetwork, SyncProvider};
|
||||||
|
use ethcore_rpc::Extendable;
|
||||||
pub use ethcore_rpc::ConfirmationsQueue;
|
pub use ethcore_rpc::ConfirmationsQueue;
|
||||||
|
|
||||||
use ethcore_rpc::Extendable;
|
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Clone, Eq, Hash)]
|
||||||
pub enum Api {
|
pub enum Api {
|
||||||
Web3,
|
Web3,
|
||||||
Net,
|
Net,
|
||||||
@ -41,18 +42,8 @@ pub enum Api {
|
|||||||
Rpc,
|
Rpc,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub enum ApiError {
|
|
||||||
UnknownApi(String)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub enum ApiSet {
|
|
||||||
SafeContext,
|
|
||||||
UnsafeContext,
|
|
||||||
List(Vec<Api>),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromStr for Api {
|
impl FromStr for Api {
|
||||||
type Err = ApiError;
|
type Err = String;
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
use self::Api::*;
|
use self::Api::*;
|
||||||
@ -67,11 +58,41 @@ impl FromStr for Api {
|
|||||||
"ethcore_set" => Ok(EthcoreSet),
|
"ethcore_set" => Ok(EthcoreSet),
|
||||||
"traces" => Ok(Traces),
|
"traces" => Ok(Traces),
|
||||||
"rpc" => Ok(Rpc),
|
"rpc" => Ok(Rpc),
|
||||||
e => Err(ApiError::UnknownApi(e.into())),
|
api => Err(format!("Unknown api: {}", api))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum ApiSet {
|
||||||
|
SafeContext,
|
||||||
|
UnsafeContext,
|
||||||
|
List(HashSet<Api>),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for ApiSet {
|
||||||
|
fn default() -> Self {
|
||||||
|
ApiSet::UnsafeContext
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialEq for ApiSet {
|
||||||
|
fn eq(&self, other: &Self) -> bool {
|
||||||
|
self.list_apis() == other.list_apis()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for ApiSet {
|
||||||
|
type Err = String;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
s.split(',')
|
||||||
|
.map(Api::from_str)
|
||||||
|
.collect::<Result<_, _>>()
|
||||||
|
.map(ApiSet::List)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct Dependencies {
|
pub struct Dependencies {
|
||||||
pub signer_port: Option<u16>,
|
pub signer_port: Option<u16>,
|
||||||
pub signer_queue: Arc<ConfirmationsQueue>,
|
pub signer_queue: Arc<ConfirmationsQueue>,
|
||||||
@ -106,31 +127,27 @@ fn to_modules(apis: &[Api]) -> BTreeMap<String, String> {
|
|||||||
modules
|
modules
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn from_str(apis: Vec<&str>) -> Vec<Api> {
|
impl ApiSet {
|
||||||
apis.into_iter()
|
pub fn list_apis(&self) -> HashSet<Api> {
|
||||||
.map(Api::from_str)
|
match *self {
|
||||||
.collect::<Result<Vec<Api>, ApiError>>()
|
ApiSet::List(ref apis) => apis.clone(),
|
||||||
.unwrap_or_else(|e| match e {
|
ApiSet::UnsafeContext => {
|
||||||
ApiError::UnknownApi(s) => die!("Unknown RPC API specified: {}", s),
|
vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Ethcore, Api::Traces, Api::Rpc]
|
||||||
})
|
.into_iter().collect()
|
||||||
}
|
},
|
||||||
|
_ => {
|
||||||
fn list_apis(apis: ApiSet) -> Vec<Api> {
|
vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Signer, Api::Ethcore, Api::EthcoreSet, Api::Traces, Api::Rpc]
|
||||||
match apis {
|
.into_iter().collect()
|
||||||
ApiSet::List(apis) => apis,
|
},
|
||||||
ApiSet::UnsafeContext => {
|
}
|
||||||
vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Ethcore, Api::Traces, Api::Rpc]
|
|
||||||
},
|
|
||||||
_ => {
|
|
||||||
vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Signer, Api::Ethcore, Api::EthcoreSet, Api::Traces, Api::Rpc]
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn setup_rpc<T: Extendable>(server: T, deps: Arc<Dependencies>, apis: ApiSet) -> T {
|
pub fn setup_rpc<T: Extendable>(server: T, deps: Arc<Dependencies>, apis: ApiSet) -> T {
|
||||||
use ethcore_rpc::v1::*;
|
use ethcore_rpc::v1::*;
|
||||||
|
|
||||||
let apis = list_apis(apis);
|
// it's turned into vector, cause ont of the cases requires &[]
|
||||||
|
let apis = apis.list_apis().into_iter().collect::<Vec<_>>();
|
||||||
for api in &apis {
|
for api in &apis {
|
||||||
match *api {
|
match *api {
|
||||||
Api::Web3 => {
|
Api::Web3 => {
|
||||||
@ -140,8 +157,18 @@ pub fn setup_rpc<T: Extendable>(server: T, deps: Arc<Dependencies>, apis: ApiSet
|
|||||||
server.add_delegate(NetClient::new(&deps.sync).to_delegate());
|
server.add_delegate(NetClient::new(&deps.sync).to_delegate());
|
||||||
},
|
},
|
||||||
Api::Eth => {
|
Api::Eth => {
|
||||||
server.add_delegate(EthClient::new(&deps.client, &deps.sync, &deps.secret_store, &deps.miner, &deps.external_miner, deps.allow_pending_receipt_query).to_delegate());
|
let client = EthClient::new(
|
||||||
server.add_delegate(EthFilterClient::new(&deps.client, &deps.miner).to_delegate());
|
&deps.client,
|
||||||
|
&deps.sync,
|
||||||
|
&deps.secret_store,
|
||||||
|
&deps.miner,
|
||||||
|
&deps.external_miner,
|
||||||
|
deps.allow_pending_receipt_query
|
||||||
|
);
|
||||||
|
server.add_delegate(client.to_delegate());
|
||||||
|
|
||||||
|
let filter_client = EthFilterClient::new(&deps.client, &deps.miner);
|
||||||
|
server.add_delegate(filter_client.to_delegate());
|
||||||
|
|
||||||
if deps.signer_port.is_some() {
|
if deps.signer_port.is_some() {
|
||||||
server.add_delegate(EthSigningQueueClient::new(&deps.signer_queue, &deps.client, &deps.miner, &deps.secret_store).to_delegate());
|
server.add_delegate(EthSigningQueueClient::new(&deps.signer_queue, &deps.client, &deps.miner, &deps.secret_store).to_delegate());
|
||||||
@ -173,3 +200,46 @@ pub fn setup_rpc<T: Extendable>(server: T, deps: Arc<Dependencies>, apis: ApiSet
|
|||||||
}
|
}
|
||||||
server
|
server
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use super::{Api, ApiSet};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_api_parsing() {
|
||||||
|
assert_eq!(Api::Web3, "web3".parse().unwrap());
|
||||||
|
assert_eq!(Api::Net, "net".parse().unwrap());
|
||||||
|
assert_eq!(Api::Eth, "eth".parse().unwrap());
|
||||||
|
assert_eq!(Api::Personal, "personal".parse().unwrap());
|
||||||
|
assert_eq!(Api::Signer, "signer".parse().unwrap());
|
||||||
|
assert_eq!(Api::Ethcore, "ethcore".parse().unwrap());
|
||||||
|
assert_eq!(Api::EthcoreSet, "ethcore_set".parse().unwrap());
|
||||||
|
assert_eq!(Api::Traces, "traces".parse().unwrap());
|
||||||
|
assert_eq!(Api::Rpc, "rpc".parse().unwrap());
|
||||||
|
assert!("rp".parse::<Api>().is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_api_set_default() {
|
||||||
|
assert_eq!(ApiSet::UnsafeContext, ApiSet::default());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_api_set_parsing() {
|
||||||
|
assert_eq!(ApiSet::List(vec![Api::Web3, Api::Eth].into_iter().collect()), "web3,eth".parse().unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_api_set_unsafe_context() {
|
||||||
|
let expected = vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Ethcore, Api::Traces, Api::Rpc]
|
||||||
|
.into_iter().collect();
|
||||||
|
assert_eq!(ApiSet::UnsafeContext.list_apis(), expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_api_set_safe_context() {
|
||||||
|
let expected = vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Signer, Api::Ethcore, Api::EthcoreSet, Api::Traces, Api::Rpc]
|
||||||
|
.into_iter().collect();
|
||||||
|
assert_eq!(ApiSet::SafeContext.list_apis(), expected);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
329
parity/run.rs
Normal file
329
parity/run.rs
Normal file
@ -0,0 +1,329 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::sync::{Arc, Mutex, Condvar};
|
||||||
|
use std::path::Path;
|
||||||
|
use ctrlc::CtrlC;
|
||||||
|
use fdlimit::raise_fd_limit;
|
||||||
|
use ethcore_logger::{Config as LogConfig, setup_log};
|
||||||
|
use util::network_settings::NetworkSettings;
|
||||||
|
use util::{Colour, version, NetworkConfiguration, U256};
|
||||||
|
use util::panics::{MayPanic, ForwardPanic, PanicHandler};
|
||||||
|
use ethcore::client::{Mode, Switch, DatabaseCompactionProfile, VMType};
|
||||||
|
use ethcore::service::ClientService;
|
||||||
|
use ethcore::account_provider::AccountProvider;
|
||||||
|
use ethcore::miner::{Miner, MinerService, ExternalMiner, MinerOptions};
|
||||||
|
use ethsync::SyncConfig;
|
||||||
|
use informant::Informant;
|
||||||
|
|
||||||
|
use rpc::{HttpServer, IpcServer, HttpConfiguration, IpcConfiguration};
|
||||||
|
use signer::SignerServer;
|
||||||
|
use dapps::WebappServer;
|
||||||
|
use io_handler::ClientIoHandler;
|
||||||
|
use params::{SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras};
|
||||||
|
use helpers::{to_client_config, execute_upgrades, passwords_from_files};
|
||||||
|
use dir::Directories;
|
||||||
|
use cache::CacheConfig;
|
||||||
|
use dapps;
|
||||||
|
use signer;
|
||||||
|
use modules;
|
||||||
|
use rpc_apis;
|
||||||
|
use rpc;
|
||||||
|
use url;
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub struct RunCmd {
|
||||||
|
pub cache_config: CacheConfig,
|
||||||
|
pub dirs: Directories,
|
||||||
|
pub spec: SpecType,
|
||||||
|
pub pruning: Pruning,
|
||||||
|
/// Some if execution should be daemonized. Contains pid_file path.
|
||||||
|
pub daemon: Option<String>,
|
||||||
|
pub logger_config: LogConfig,
|
||||||
|
pub miner_options: MinerOptions,
|
||||||
|
pub http_conf: HttpConfiguration,
|
||||||
|
pub ipc_conf: IpcConfiguration,
|
||||||
|
pub net_conf: NetworkConfiguration,
|
||||||
|
pub network_id: Option<U256>,
|
||||||
|
pub acc_conf: AccountsConfig,
|
||||||
|
pub gas_pricer: GasPricerConfig,
|
||||||
|
pub miner_extras: MinerExtras,
|
||||||
|
pub mode: Mode,
|
||||||
|
pub tracing: Switch,
|
||||||
|
pub compaction: DatabaseCompactionProfile,
|
||||||
|
pub vm_type: VMType,
|
||||||
|
pub enable_network: bool,
|
||||||
|
pub geth_compatibility: bool,
|
||||||
|
pub signer_port: Option<u16>,
|
||||||
|
pub net_settings: NetworkSettings,
|
||||||
|
pub dapps_conf: dapps::Configuration,
|
||||||
|
pub signer_conf: signer::Configuration,
|
||||||
|
pub ui: bool,
|
||||||
|
pub name: String,
|
||||||
|
pub custom_bootnodes: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn execute(cmd: RunCmd) -> Result<(), String> {
|
||||||
|
// create supervisor
|
||||||
|
let mut hypervisor = modules::hypervisor();
|
||||||
|
|
||||||
|
// increase max number of open files
|
||||||
|
raise_fd_limit();
|
||||||
|
|
||||||
|
// set up logger
|
||||||
|
let logger = try!(setup_log(&cmd.logger_config));
|
||||||
|
|
||||||
|
// set up panic handler
|
||||||
|
let panic_handler = PanicHandler::new_in_arc();
|
||||||
|
|
||||||
|
// create dirs used by parity
|
||||||
|
try!(cmd.dirs.create_dirs());
|
||||||
|
|
||||||
|
// load spec
|
||||||
|
let spec = try!(cmd.spec.spec());
|
||||||
|
let fork_name = spec.fork_name.clone();
|
||||||
|
|
||||||
|
// load genesis hash
|
||||||
|
let genesis_hash = spec.genesis_header().hash();
|
||||||
|
|
||||||
|
// select pruning algorithm
|
||||||
|
let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, fork_name.as_ref());
|
||||||
|
|
||||||
|
// prepare client_path
|
||||||
|
let client_path = cmd.dirs.client_path(genesis_hash, fork_name.as_ref(), algorithm);
|
||||||
|
|
||||||
|
// execute upgrades
|
||||||
|
try!(execute_upgrades(&cmd.dirs, genesis_hash, fork_name.as_ref(), algorithm));
|
||||||
|
|
||||||
|
// run in daemon mode
|
||||||
|
if let Some(pid_file) = cmd.daemon {
|
||||||
|
try!(daemonize(pid_file));
|
||||||
|
}
|
||||||
|
|
||||||
|
// display info about used pruning algorithm
|
||||||
|
info!("Starting {}", Colour::White.bold().paint(version()));
|
||||||
|
info!("Using state DB journalling strategy {}", Colour::White.bold().paint(algorithm.as_str()));
|
||||||
|
|
||||||
|
// display warning about using experimental journaldb alorithm
|
||||||
|
if !algorithm.is_stable() {
|
||||||
|
warn!("Your chosen strategy is {}! You can re-run with --pruning to change.", Colour::Red.bold().paint("unstable"));
|
||||||
|
}
|
||||||
|
|
||||||
|
// create sync config
|
||||||
|
let mut sync_config = SyncConfig::default();
|
||||||
|
sync_config.network_id = match cmd.network_id {
|
||||||
|
Some(id) => id,
|
||||||
|
None => spec.network_id(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// prepare account provider
|
||||||
|
let account_provider = Arc::new(try!(prepare_account_provider(&cmd.dirs, cmd.acc_conf)));
|
||||||
|
|
||||||
|
// create miner
|
||||||
|
let miner = Miner::new(cmd.miner_options, cmd.gas_pricer.into(), spec, Some(account_provider.clone()));
|
||||||
|
miner.set_author(cmd.miner_extras.author);
|
||||||
|
miner.set_gas_floor_target(cmd.miner_extras.gas_floor_target);
|
||||||
|
miner.set_gas_ceil_target(cmd.miner_extras.gas_ceil_target);
|
||||||
|
miner.set_extra_data(cmd.miner_extras.extra_data);
|
||||||
|
miner.set_transactions_limit(cmd.miner_extras.transactions_limit);
|
||||||
|
|
||||||
|
// create client config
|
||||||
|
let client_config = to_client_config(
|
||||||
|
&cmd.cache_config,
|
||||||
|
&cmd.dirs,
|
||||||
|
genesis_hash,
|
||||||
|
cmd.mode,
|
||||||
|
cmd.tracing,
|
||||||
|
cmd.pruning,
|
||||||
|
cmd.compaction,
|
||||||
|
cmd.vm_type,
|
||||||
|
cmd.name,
|
||||||
|
fork_name.as_ref(),
|
||||||
|
);
|
||||||
|
|
||||||
|
// load spec
|
||||||
|
// TODO: make it clonable and load it only once!
|
||||||
|
let spec = try!(cmd.spec.spec());
|
||||||
|
|
||||||
|
// set up bootnodes
|
||||||
|
let mut net_conf = cmd.net_conf;
|
||||||
|
if !cmd.custom_bootnodes {
|
||||||
|
net_conf.boot_nodes = spec.nodes.clone();
|
||||||
|
}
|
||||||
|
|
||||||
|
// create client
|
||||||
|
let service = try!(ClientService::start(
|
||||||
|
client_config,
|
||||||
|
spec,
|
||||||
|
Path::new(&client_path),
|
||||||
|
miner.clone(),
|
||||||
|
).map_err(|e| format!("Client service error: {:?}", e)));
|
||||||
|
|
||||||
|
// forward panics from service
|
||||||
|
panic_handler.forward_from(&service);
|
||||||
|
|
||||||
|
// take handle to client
|
||||||
|
let client = service.client();
|
||||||
|
|
||||||
|
// create external miner
|
||||||
|
let external_miner = Arc::new(ExternalMiner::default());
|
||||||
|
|
||||||
|
// create sync object
|
||||||
|
let (sync_provider, manage_network, chain_notify) = try!(modules::sync(
|
||||||
|
&mut hypervisor, sync_config, net_conf.into(), client.clone(), &cmd.logger_config,
|
||||||
|
).map_err(|e| format!("Sync error: {}", e)));
|
||||||
|
|
||||||
|
service.add_notify(chain_notify.clone());
|
||||||
|
|
||||||
|
// start network
|
||||||
|
if cmd.enable_network {
|
||||||
|
chain_notify.start();
|
||||||
|
}
|
||||||
|
|
||||||
|
// set up dependencies for rpc servers
|
||||||
|
let deps_for_rpc_apis = Arc::new(rpc_apis::Dependencies {
|
||||||
|
signer_port: cmd.signer_port,
|
||||||
|
signer_queue: Arc::new(rpc_apis::ConfirmationsQueue::default()),
|
||||||
|
client: client.clone(),
|
||||||
|
sync: sync_provider.clone(),
|
||||||
|
net: manage_network.clone(),
|
||||||
|
secret_store: account_provider.clone(),
|
||||||
|
miner: miner.clone(),
|
||||||
|
external_miner: external_miner.clone(),
|
||||||
|
logger: logger.clone(),
|
||||||
|
settings: Arc::new(cmd.net_settings.clone()),
|
||||||
|
allow_pending_receipt_query: !cmd.geth_compatibility,
|
||||||
|
net_service: manage_network.clone()
|
||||||
|
});
|
||||||
|
|
||||||
|
let dependencies = rpc::Dependencies {
|
||||||
|
panic_handler: panic_handler.clone(),
|
||||||
|
apis: deps_for_rpc_apis.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// start rpc servers
|
||||||
|
let http_server = try!(rpc::new_http(cmd.http_conf, &dependencies));
|
||||||
|
let ipc_server = try!(rpc::new_ipc(cmd.ipc_conf, &dependencies));
|
||||||
|
|
||||||
|
let dapps_deps = dapps::Dependencies {
|
||||||
|
panic_handler: panic_handler.clone(),
|
||||||
|
apis: deps_for_rpc_apis.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// start dapps server
|
||||||
|
let dapps_server = try!(dapps::new(cmd.dapps_conf.clone(), dapps_deps));
|
||||||
|
|
||||||
|
let signer_deps = signer::Dependencies {
|
||||||
|
panic_handler: panic_handler.clone(),
|
||||||
|
apis: deps_for_rpc_apis.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// start signer server
|
||||||
|
let signer_server = try!(signer::start(cmd.signer_conf, signer_deps));
|
||||||
|
|
||||||
|
let io_handler = Arc::new(ClientIoHandler {
|
||||||
|
client: service.client(),
|
||||||
|
info: Arc::new(Informant::new(client.clone(), Some(sync_provider.clone()), Some(manage_network.clone()), cmd.logger_config.color)),
|
||||||
|
sync: sync_provider.clone(),
|
||||||
|
net: manage_network.clone(),
|
||||||
|
accounts: account_provider.clone(),
|
||||||
|
});
|
||||||
|
service.register_io_handler(io_handler).expect("Error registering IO handler");
|
||||||
|
|
||||||
|
// start ui
|
||||||
|
if cmd.ui {
|
||||||
|
if !cmd.dapps_conf.enabled {
|
||||||
|
return Err("Cannot use UI command with Dapps turned off.".into())
|
||||||
|
}
|
||||||
|
url::open(&format!("http://{}:{}/", cmd.dapps_conf.interface, cmd.dapps_conf.port));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle exit
|
||||||
|
wait_for_exit(panic_handler, http_server, ipc_server, dapps_server, signer_server);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(windows))]
|
||||||
|
fn daemonize(pid_file: String) -> Result<(), String> {
|
||||||
|
extern crate daemonize;
|
||||||
|
|
||||||
|
daemonize::Daemonize::new()
|
||||||
|
.pid_file(pid_file)
|
||||||
|
.chown_pid_file(true)
|
||||||
|
.start()
|
||||||
|
.map(|_| ())
|
||||||
|
.map_err(|e| format!("Couldn't daemonize; {}", e))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(windows)]
|
||||||
|
fn daemonize(_pid_file: String) -> Result<(), String> {
|
||||||
|
Err("daemon is no supported on windows".into())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn prepare_account_provider(dirs: &Directories, cfg: AccountsConfig) -> Result<AccountProvider, String> {
|
||||||
|
use ethcore::ethstore::{import_accounts, EthStore};
|
||||||
|
use ethcore::ethstore::dir::{GethDirectory, DirectoryType, DiskDirectory};
|
||||||
|
|
||||||
|
let passwords = try!(passwords_from_files(cfg.password_files));
|
||||||
|
|
||||||
|
if cfg.import_keys {
|
||||||
|
let t = if cfg.testnet {
|
||||||
|
DirectoryType::Testnet
|
||||||
|
} else {
|
||||||
|
DirectoryType::Main
|
||||||
|
};
|
||||||
|
|
||||||
|
let from = GethDirectory::open(t);
|
||||||
|
let to = DiskDirectory::create(dirs.keys.clone()).unwrap();
|
||||||
|
// ignore error, cause geth may not exist
|
||||||
|
let _ = import_accounts(&from, &to);
|
||||||
|
}
|
||||||
|
|
||||||
|
let dir = Box::new(DiskDirectory::create(dirs.keys.clone()).unwrap());
|
||||||
|
let account_service = AccountProvider::new(Box::new(EthStore::open_with_iterations(dir, cfg.iterations).unwrap()));
|
||||||
|
|
||||||
|
for a in cfg.unlocked_accounts {
|
||||||
|
if passwords.iter().find(|p| account_service.unlock_account_permanently(a, (*p).clone()).is_ok()).is_none() {
|
||||||
|
return Err(format!("No password given to unlock account {}. Pass the password using `--password`.", a));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(account_service)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn wait_for_exit(
|
||||||
|
panic_handler: Arc<PanicHandler>,
|
||||||
|
_http_server: Option<HttpServer>,
|
||||||
|
_ipc_server: Option<IpcServer>,
|
||||||
|
_dapps_server: Option<WebappServer>,
|
||||||
|
_signer_server: Option<SignerServer>
|
||||||
|
) {
|
||||||
|
let exit = Arc::new(Condvar::new());
|
||||||
|
|
||||||
|
// Handle possible exits
|
||||||
|
let e = exit.clone();
|
||||||
|
CtrlC::set_handler(move || { e.notify_all(); });
|
||||||
|
|
||||||
|
// Handle panics
|
||||||
|
let e = exit.clone();
|
||||||
|
panic_handler.on_panic(move |_reason| { e.notify_all(); });
|
||||||
|
|
||||||
|
// Wait for signal
|
||||||
|
let mutex = Mutex::new(());
|
||||||
|
let _ = exit.wait(mutex.lock().unwrap());
|
||||||
|
info!("Finishing work, please wait...");
|
||||||
|
}
|
@ -22,28 +22,38 @@ use util::panics::{ForwardPanic, PanicHandler};
|
|||||||
use util::path::restrict_permissions_owner;
|
use util::path::restrict_permissions_owner;
|
||||||
use rpc_apis;
|
use rpc_apis;
|
||||||
use ethcore_signer as signer;
|
use ethcore_signer as signer;
|
||||||
use die::*;
|
use helpers::replace_home;
|
||||||
|
|
||||||
pub use ethcore_signer::Server as SignerServer;
|
pub use ethcore_signer::Server as SignerServer;
|
||||||
|
|
||||||
const CODES_FILENAME: &'static str = "authcodes";
|
const CODES_FILENAME: &'static str = "authcodes";
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
pub struct Configuration {
|
pub struct Configuration {
|
||||||
pub enabled: bool,
|
pub enabled: bool,
|
||||||
pub port: u16,
|
pub port: u16,
|
||||||
pub signer_path: String,
|
pub signer_path: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for Configuration {
|
||||||
|
fn default() -> Self {
|
||||||
|
Configuration {
|
||||||
|
enabled: true,
|
||||||
|
port: 8180,
|
||||||
|
signer_path: replace_home("$HOME/.parity/signer"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct Dependencies {
|
pub struct Dependencies {
|
||||||
pub panic_handler: Arc<PanicHandler>,
|
pub panic_handler: Arc<PanicHandler>,
|
||||||
pub apis: Arc<rpc_apis::Dependencies>,
|
pub apis: Arc<rpc_apis::Dependencies>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn start(conf: Configuration, deps: Dependencies) -> Option<SignerServer> {
|
pub fn start(conf: Configuration, deps: Dependencies) -> Result<Option<SignerServer>, String> {
|
||||||
if !conf.enabled {
|
if !conf.enabled {
|
||||||
None
|
Ok(None)
|
||||||
} else {
|
} else {
|
||||||
Some(do_start(conf, deps))
|
Ok(Some(try!(do_start(conf, deps))))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -54,7 +64,13 @@ fn codes_path(path: String) -> PathBuf {
|
|||||||
p
|
p
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_token(path: String) -> io::Result<String> {
|
pub fn new_token(path: String) -> Result<String, String> {
|
||||||
|
generate_new_token(path)
|
||||||
|
.map(|code| format!("This key code will authorise your System Signer UI: {}", Colour::White.bold().paint(code)))
|
||||||
|
.map_err(|err| format!("Error generating token: {:?}", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn generate_new_token(path: String) -> io::Result<String> {
|
||||||
let path = codes_path(path);
|
let path = codes_path(path);
|
||||||
let mut codes = try!(signer::AuthCodes::from_file(&path));
|
let mut codes = try!(signer::AuthCodes::from_file(&path));
|
||||||
let code = try!(codes.generate_new());
|
let code = try!(codes.generate_new());
|
||||||
@ -63,10 +79,10 @@ pub fn new_token(path: String) -> io::Result<String> {
|
|||||||
Ok(code)
|
Ok(code)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn do_start(conf: Configuration, deps: Dependencies) -> SignerServer {
|
fn do_start(conf: Configuration, deps: Dependencies) -> Result<SignerServer, String> {
|
||||||
let addr = format!("127.0.0.1:{}", conf.port).parse().unwrap_or_else(|_| {
|
let addr = try!(format!("127.0.0.1:{}", conf.port)
|
||||||
die!("Invalid port specified: {}", conf.port)
|
.parse()
|
||||||
});
|
.map_err(|_| format!("Invalid port specified: {}", conf.port)));
|
||||||
|
|
||||||
let start_result = {
|
let start_result = {
|
||||||
let server = signer::ServerBuilder::new(
|
let server = signer::ServerBuilder::new(
|
||||||
@ -78,11 +94,11 @@ fn do_start(conf: Configuration, deps: Dependencies) -> SignerServer {
|
|||||||
};
|
};
|
||||||
|
|
||||||
match start_result {
|
match start_result {
|
||||||
Err(signer::ServerError::IoError(err)) => die_with_io_error("Trusted Signer", err),
|
Err(signer::ServerError::IoError(err)) => Err(format!("Trusted Signer Error: {}", err)),
|
||||||
Err(e) => die!("Trusted Signer: {:?}", e),
|
Err(e) => Err(format!("Trusted Signer Error: {:?}", e)),
|
||||||
Ok(server) => {
|
Ok(server) => {
|
||||||
deps.panic_handler.forward_from(&server);
|
deps.panic_handler.forward_from(&server);
|
||||||
server
|
Ok(server)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -38,8 +38,7 @@ use ethsync::{SyncProvider, EthSync, ManageNetwork, ServiceConfiguration};
|
|||||||
use std::thread;
|
use std::thread;
|
||||||
use nanoipc::IpcInterface;
|
use nanoipc::IpcInterface;
|
||||||
|
|
||||||
use ethcore_logger::Settings as LogSettings;
|
use ethcore_logger::{Config as LogConfig, setup_log};
|
||||||
use ethcore_logger::setup_log;
|
|
||||||
|
|
||||||
const USAGE: &'static str = "
|
const USAGE: &'static str = "
|
||||||
Ethcore sync service
|
Ethcore sync service
|
||||||
@ -63,18 +62,12 @@ struct Args {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Args {
|
impl Args {
|
||||||
pub fn log_settings(&self) -> LogSettings {
|
pub fn log_settings(&self) -> LogConfig {
|
||||||
let mut settings = LogSettings::new();
|
LogConfig {
|
||||||
if self.flag_no_color || cfg!(windows) {
|
color: self.flag_no_color || cfg!(windows),
|
||||||
settings = settings.no_color();
|
mode: self.flag_logging.clone(),
|
||||||
|
file: self.flag_log_file.clone(),
|
||||||
}
|
}
|
||||||
if let Some(ref init) = self.flag_logging {
|
|
||||||
settings = settings.init(init.to_owned())
|
|
||||||
}
|
|
||||||
if let Some(ref file) = self.flag_log_file {
|
|
||||||
settings = settings.file(file.to_owned())
|
|
||||||
}
|
|
||||||
settings
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -97,7 +90,7 @@ fn main() {
|
|||||||
.and_then(|d| d.decode())
|
.and_then(|d| d.decode())
|
||||||
.unwrap_or_else(|e| e.exit());
|
.unwrap_or_else(|e| e.exit());
|
||||||
|
|
||||||
setup_log(&args.log_settings());
|
setup_log(&args.log_settings()).expect("Log initialization failure");
|
||||||
|
|
||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
io::stdin().read_to_end(&mut buffer).expect("Failed to read initialisation payload");
|
io::stdin().read_to_end(&mut buffer).expect("Failed to read initialisation payload");
|
||||||
|
@ -565,7 +565,7 @@ macro_rules! construct_uint {
|
|||||||
impl Uint for $name {
|
impl Uint for $name {
|
||||||
|
|
||||||
fn from_dec_str(value: &str) -> Result<Self, FromDecStrErr> {
|
fn from_dec_str(value: &str) -> Result<Self, FromDecStrErr> {
|
||||||
if value.bytes().any(|b| b < 48 && b > 57) {
|
if !value.bytes().all(|b| b >= 48 && b <= 57) {
|
||||||
return Err(FromDecStrErr::InvalidCharacter)
|
return Err(FromDecStrErr::InvalidCharacter)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1788,6 +1788,7 @@ mod tests {
|
|||||||
assert_eq!(U256::from_dec_str("10").unwrap(), U256::from(10u64));
|
assert_eq!(U256::from_dec_str("10").unwrap(), U256::from(10u64));
|
||||||
assert_eq!(U256::from_dec_str("1024").unwrap(), U256::from(1024u64));
|
assert_eq!(U256::from_dec_str("1024").unwrap(), U256::from(1024u64));
|
||||||
assert_eq!(U256::from_dec_str("115792089237316195423570985008687907853269984665640564039457584007913129639936"), Err(FromDecStrErr::InvalidLength));
|
assert_eq!(U256::from_dec_str("115792089237316195423570985008687907853269984665640564039457584007913129639936"), Err(FromDecStrErr::InvalidLength));
|
||||||
|
assert_eq!(U256::from_dec_str("0x11"), Err(FromDecStrErr::InvalidCharacter));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
//
|
//
|
||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.extern crate libc;
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.extern crate libc;
|
||||||
|
|
||||||
extern crate libc;
|
extern crate libc;
|
||||||
pub mod raise_fd_limit;
|
mod raise_fd_limit;
|
||||||
pub use raise_fd_limit::raise_fd_limit;
|
pub use raise_fd_limit::raise_fd_limit;
|
||||||
|
@ -15,70 +15,74 @@
|
|||||||
///
|
///
|
||||||
#[cfg(any(target_os = "macos", target_os = "ios"))]
|
#[cfg(any(target_os = "macos", target_os = "ios"))]
|
||||||
#[allow(non_camel_case_types)]
|
#[allow(non_camel_case_types)]
|
||||||
pub unsafe fn raise_fd_limit() {
|
pub fn raise_fd_limit() {
|
||||||
use libc;
|
use libc;
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::mem::size_of_val;
|
use std::mem::size_of_val;
|
||||||
use std::ptr::null_mut;
|
use std::ptr::null_mut;
|
||||||
|
|
||||||
static CTL_KERN: libc::c_int = 1;
|
unsafe {
|
||||||
static KERN_MAXFILESPERPROC: libc::c_int = 29;
|
static CTL_KERN: libc::c_int = 1;
|
||||||
|
static KERN_MAXFILESPERPROC: libc::c_int = 29;
|
||||||
|
|
||||||
// The strategy here is to fetch the current resource limits, read the
|
// The strategy here is to fetch the current resource limits, read the
|
||||||
// kern.maxfilesperproc sysctl value, and bump the soft resource limit for
|
// kern.maxfilesperproc sysctl value, and bump the soft resource limit for
|
||||||
// maxfiles up to the sysctl value.
|
// maxfiles up to the sysctl value.
|
||||||
|
|
||||||
// Fetch the kern.maxfilesperproc value
|
// Fetch the kern.maxfilesperproc value
|
||||||
let mut mib: [libc::c_int; 2] = [CTL_KERN, KERN_MAXFILESPERPROC];
|
let mut mib: [libc::c_int; 2] = [CTL_KERN, KERN_MAXFILESPERPROC];
|
||||||
let mut maxfiles: libc::c_int = 0;
|
let mut maxfiles: libc::c_int = 0;
|
||||||
let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t;
|
let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t;
|
||||||
if libc::sysctl(&mut mib[0], 2, &mut maxfiles as *mut _ as *mut _, &mut size,
|
if libc::sysctl(&mut mib[0], 2, &mut maxfiles as *mut _ as *mut _, &mut size,
|
||||||
null_mut(), 0) != 0 {
|
null_mut(), 0) != 0 {
|
||||||
let err = io::Error::last_os_error();
|
let err = io::Error::last_os_error();
|
||||||
panic!("raise_fd_limit: error calling sysctl: {}", err);
|
panic!("raise_fd_limit: error calling sysctl: {}", err);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fetch the current resource limits
|
// Fetch the current resource limits
|
||||||
let mut rlim = libc::rlimit{rlim_cur: 0, rlim_max: 0};
|
let mut rlim = libc::rlimit{rlim_cur: 0, rlim_max: 0};
|
||||||
if libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) != 0 {
|
if libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) != 0 {
|
||||||
let err = io::Error::last_os_error();
|
let err = io::Error::last_os_error();
|
||||||
panic!("raise_fd_limit: error calling getrlimit: {}", err);
|
panic!("raise_fd_limit: error calling getrlimit: {}", err);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bump the soft limit to the smaller of kern.maxfilesperproc and the hard
|
// Bump the soft limit to the smaller of kern.maxfilesperproc and the hard
|
||||||
// limit
|
// limit
|
||||||
rlim.rlim_cur = cmp::min(maxfiles as libc::rlim_t, rlim.rlim_max);
|
rlim.rlim_cur = cmp::min(maxfiles as libc::rlim_t, rlim.rlim_max);
|
||||||
|
|
||||||
// Set our newly-increased resource limit
|
// Set our newly-increased resource limit
|
||||||
if libc::setrlimit(libc::RLIMIT_NOFILE, &rlim) != 0 {
|
if libc::setrlimit(libc::RLIMIT_NOFILE, &rlim) != 0 {
|
||||||
let err = io::Error::last_os_error();
|
let err = io::Error::last_os_error();
|
||||||
panic!("raise_fd_limit: error calling setrlimit: {}", err);
|
panic!("raise_fd_limit: error calling setrlimit: {}", err);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(any(target_os = "linux"))]
|
#[cfg(any(target_os = "linux"))]
|
||||||
#[allow(non_camel_case_types)]
|
#[allow(non_camel_case_types)]
|
||||||
pub unsafe fn raise_fd_limit() {
|
pub fn raise_fd_limit() {
|
||||||
use libc;
|
use libc;
|
||||||
use std::io;
|
use std::io;
|
||||||
|
|
||||||
// Fetch the current resource limits
|
unsafe {
|
||||||
let mut rlim = libc::rlimit{rlim_cur: 0, rlim_max: 0};
|
// Fetch the current resource limits
|
||||||
if libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) != 0 {
|
let mut rlim = libc::rlimit{rlim_cur: 0, rlim_max: 0};
|
||||||
let err = io::Error::last_os_error();
|
if libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) != 0 {
|
||||||
panic!("raise_fd_limit: error calling getrlimit: {}", err);
|
let err = io::Error::last_os_error();
|
||||||
}
|
panic!("raise_fd_limit: error calling getrlimit: {}", err);
|
||||||
|
}
|
||||||
|
|
||||||
// Set soft limit to hard imit
|
// Set soft limit to hard imit
|
||||||
rlim.rlim_cur = rlim.rlim_max;
|
rlim.rlim_cur = rlim.rlim_max;
|
||||||
|
|
||||||
// Set our newly-increased resource limit
|
// Set our newly-increased resource limit
|
||||||
if libc::setrlimit(libc::RLIMIT_NOFILE, &rlim) != 0 {
|
if libc::setrlimit(libc::RLIMIT_NOFILE, &rlim) != 0 {
|
||||||
let err = io::Error::last_os_error();
|
let err = io::Error::last_os_error();
|
||||||
panic!("raise_fd_limit: error calling setrlimit: {}", err);
|
panic!("raise_fd_limit: error calling setrlimit: {}", err);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "linux")))]
|
#[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "linux")))]
|
||||||
pub unsafe fn raise_fd_limit() {}
|
pub fn raise_fd_limit() {}
|
||||||
|
@ -67,7 +67,7 @@ pub trait FixedHash: Sized + BytesConvertable + Populatable + FromStr + Default
|
|||||||
|
|
||||||
/// Return `s` without the `0x` at the beginning of it, if any.
|
/// Return `s` without the `0x` at the beginning of it, if any.
|
||||||
pub fn clean_0x(s: &str) -> &str {
|
pub fn clean_0x(s: &str) -> &str {
|
||||||
if s.len() >= 2 && &s[0..2] == "0x" {
|
if s.starts_with("0x") {
|
||||||
&s[2..]
|
&s[2..]
|
||||||
} else {
|
} else {
|
||||||
s
|
s
|
||||||
@ -429,13 +429,13 @@ macro_rules! impl_hash {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> From<&'a str> for $from {
|
impl From<&'static str> for $from {
|
||||||
fn from(s: &'a str) -> $from {
|
fn from(s: &'static str) -> $from {
|
||||||
use std::str::FromStr;
|
let s = clean_0x(s);
|
||||||
if s.len() % 2 == 1 {
|
if s.len() % 2 == 1 {
|
||||||
$from::from_str(&("0".to_owned() + &(clean_0x(s).to_owned()))[..]).unwrap_or_else(|_| $from::new())
|
$from::from_str(&("0".to_owned() + s)).unwrap()
|
||||||
} else {
|
} else {
|
||||||
$from::from_str(clean_0x(s)).unwrap_or_else(|_| $from::new())
|
$from::from_str(s).unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -613,8 +613,6 @@ mod tests {
|
|||||||
assert_eq!(H64::from(0x1234567890abcdef), H64::from("0x1234567890abcdef"));
|
assert_eq!(H64::from(0x1234567890abcdef), H64::from("0x1234567890abcdef"));
|
||||||
assert_eq!(H64::from(0x1234567890abcdef), H64::from("1234567890abcdef"));
|
assert_eq!(H64::from(0x1234567890abcdef), H64::from("1234567890abcdef"));
|
||||||
assert_eq!(H64::from(0x234567890abcdef), H64::from("0x234567890abcdef"));
|
assert_eq!(H64::from(0x234567890abcdef), H64::from("0x234567890abcdef"));
|
||||||
// too short.
|
|
||||||
assert_eq!(H64::from(0), H64::from("0x34567890abcdef"));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -30,7 +30,7 @@ mod refcounteddb;
|
|||||||
pub use self::traits::JournalDB;
|
pub use self::traits::JournalDB;
|
||||||
|
|
||||||
/// A journal database algorithm.
|
/// A journal database algorithm.
|
||||||
#[derive(Debug, Clone, Copy)]
|
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||||
pub enum Algorithm {
|
pub enum Algorithm {
|
||||||
/// Keep all keys forever.
|
/// Keep all keys forever.
|
||||||
Archive,
|
Archive,
|
||||||
@ -60,14 +60,48 @@ impl Default for Algorithm {
|
|||||||
fn default() -> Algorithm { Algorithm::OverlayRecent }
|
fn default() -> Algorithm { Algorithm::OverlayRecent }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl FromStr for Algorithm {
|
||||||
|
type Err = String;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
match s {
|
||||||
|
"archive" => Ok(Algorithm::Archive),
|
||||||
|
"light" => Ok(Algorithm::EarlyMerge),
|
||||||
|
"fast" => Ok(Algorithm::OverlayRecent),
|
||||||
|
"basic" => Ok(Algorithm::RefCounted),
|
||||||
|
e => Err(format!("Invalid algorithm: {}", e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Algorithm {
|
||||||
|
/// Returns static str describing journal database algorithm.
|
||||||
|
pub fn as_str(&self) -> &'static str {
|
||||||
|
match *self {
|
||||||
|
Algorithm::Archive => "archive",
|
||||||
|
Algorithm::EarlyMerge => "light",
|
||||||
|
Algorithm::OverlayRecent => "fast",
|
||||||
|
Algorithm::RefCounted => "basic",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns true if pruning strategy is stable
|
||||||
|
pub fn is_stable(&self) -> bool {
|
||||||
|
match *self {
|
||||||
|
Algorithm::Archive | Algorithm::OverlayRecent => true,
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns all algorithm types.
|
||||||
|
pub fn all_types() -> Vec<Algorithm> {
|
||||||
|
vec![Algorithm::Archive, Algorithm::EarlyMerge, Algorithm::OverlayRecent, Algorithm::RefCounted]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl fmt::Display for Algorithm {
|
impl fmt::Display for Algorithm {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "{}", match self {
|
write!(f, "{}", self.as_str())
|
||||||
&Algorithm::Archive => "archive",
|
|
||||||
&Algorithm::EarlyMerge => "earlymerge",
|
|
||||||
&Algorithm::OverlayRecent => "overlayrecent",
|
|
||||||
&Algorithm::RefCounted => "refcounted",
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -85,3 +119,60 @@ pub fn new(path: &str, algorithm: Algorithm, config: DatabaseConfig) -> Box<Jour
|
|||||||
const DB_PREFIX_LEN : usize = 12;
|
const DB_PREFIX_LEN : usize = 12;
|
||||||
const LATEST_ERA_KEY : [u8; DB_PREFIX_LEN] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ];
|
const LATEST_ERA_KEY : [u8; DB_PREFIX_LEN] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ];
|
||||||
const VERSION_KEY : [u8; DB_PREFIX_LEN] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ];
|
const VERSION_KEY : [u8; DB_PREFIX_LEN] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ];
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::Algorithm;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_journal_algorithm_parsing() {
|
||||||
|
assert_eq!(Algorithm::Archive, "archive".parse().unwrap());
|
||||||
|
assert_eq!(Algorithm::EarlyMerge, "light".parse().unwrap());
|
||||||
|
assert_eq!(Algorithm::OverlayRecent, "fast".parse().unwrap());
|
||||||
|
assert_eq!(Algorithm::RefCounted, "basic".parse().unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_journal_algorithm_printing() {
|
||||||
|
assert_eq!(Algorithm::Archive.to_string(), "archive".to_owned());
|
||||||
|
assert_eq!(Algorithm::EarlyMerge.to_string(), "light".to_owned());
|
||||||
|
assert_eq!(Algorithm::OverlayRecent.to_string(), "fast".to_owned());
|
||||||
|
assert_eq!(Algorithm::RefCounted.to_string(), "basic".to_owned());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_journal_algorithm_is_stable() {
|
||||||
|
assert!(Algorithm::Archive.is_stable());
|
||||||
|
assert!(Algorithm::OverlayRecent.is_stable());
|
||||||
|
assert!(!Algorithm::EarlyMerge.is_stable());
|
||||||
|
assert!(!Algorithm::RefCounted.is_stable());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_journal_algorithm_default() {
|
||||||
|
assert_eq!(Algorithm::default(), Algorithm::OverlayRecent);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_journal_algorithm_all_types() {
|
||||||
|
// compiling should fail if some cases are not covered
|
||||||
|
let mut archive = 0;
|
||||||
|
let mut earlymerge = 0;
|
||||||
|
let mut overlayrecent = 0;
|
||||||
|
let mut refcounted = 0;
|
||||||
|
|
||||||
|
for a in &Algorithm::all_types() {
|
||||||
|
match *a {
|
||||||
|
Algorithm::Archive => archive += 1,
|
||||||
|
Algorithm::EarlyMerge => earlymerge += 1,
|
||||||
|
Algorithm::OverlayRecent => overlayrecent += 1,
|
||||||
|
Algorithm::RefCounted => refcounted += 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(archive, 1);
|
||||||
|
assert_eq!(earlymerge, 1);
|
||||||
|
assert_eq!(overlayrecent, 1);
|
||||||
|
assert_eq!(refcounted, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -51,7 +51,7 @@ const MAX_HANDSHAKES: usize = 80;
|
|||||||
const MAX_HANDSHAKES_PER_ROUND: usize = 32;
|
const MAX_HANDSHAKES_PER_ROUND: usize = 32;
|
||||||
const MAINTENANCE_TIMEOUT: u64 = 1000;
|
const MAINTENANCE_TIMEOUT: u64 = 1000;
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
/// Network service configuration
|
/// Network service configuration
|
||||||
pub struct NetworkConfiguration {
|
pub struct NetworkConfiguration {
|
||||||
/// Directory path to store network configuration. None means nothing will be saved
|
/// Directory path to store network configuration. None means nothing will be saved
|
||||||
|
@ -16,17 +16,12 @@
|
|||||||
|
|
||||||
// Based on original work by David Levy https://raw.githubusercontent.com/dlevy47/rust-interfaces
|
// Based on original work by David Levy https://raw.githubusercontent.com/dlevy47/rust-interfaces
|
||||||
|
|
||||||
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
|
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
|
||||||
use std::io;
|
use std::io;
|
||||||
use igd::{PortMappingProtocol, search_gateway_from_timeout};
|
use igd::{PortMappingProtocol, search_gateway_from_timeout};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use network::node_table::{NodeEndpoint};
|
use network::node_table::{NodeEndpoint};
|
||||||
|
|
||||||
pub enum IpAddr{
|
|
||||||
V4(Ipv4Addr),
|
|
||||||
V6(Ipv6Addr),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Socket address extension for rustc beta. To be replaces with now unstable API
|
/// Socket address extension for rustc beta. To be replaces with now unstable API
|
||||||
pub trait SocketAddrExt {
|
pub trait SocketAddrExt {
|
||||||
/// Returns true for the special 'unspecified' address 0.0.0.0.
|
/// Returns true for the special 'unspecified' address 0.0.0.0.
|
||||||
@ -66,8 +61,7 @@ mod getinterfaces {
|
|||||||
use std::{mem, io, ptr};
|
use std::{mem, io, ptr};
|
||||||
use libc::{AF_INET, AF_INET6};
|
use libc::{AF_INET, AF_INET6};
|
||||||
use libc::{getifaddrs, freeifaddrs, ifaddrs, sockaddr, sockaddr_in, sockaddr_in6};
|
use libc::{getifaddrs, freeifaddrs, ifaddrs, sockaddr, sockaddr_in, sockaddr_in6};
|
||||||
use std::net::{Ipv4Addr, Ipv6Addr};
|
use std::net::{Ipv4Addr, Ipv6Addr, IpAddr};
|
||||||
use super::IpAddr;
|
|
||||||
|
|
||||||
fn convert_sockaddr (sa: *mut sockaddr) -> Option<IpAddr> {
|
fn convert_sockaddr (sa: *mut sockaddr) -> Option<IpAddr> {
|
||||||
if sa == ptr::null_mut() { return None; }
|
if sa == ptr::null_mut() { return None; }
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
//! Structure to hold network settings configured from CLI
|
//! Structure to hold network settings configured from CLI
|
||||||
|
|
||||||
/// Networking & RPC settings
|
/// Networking & RPC settings
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
pub struct NetworkSettings {
|
pub struct NetworkSettings {
|
||||||
/// Node name
|
/// Node name
|
||||||
pub name: String,
|
pub name: String,
|
||||||
@ -34,3 +34,16 @@ pub struct NetworkSettings {
|
|||||||
pub rpc_port: u16,
|
pub rpc_port: u16,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for NetworkSettings {
|
||||||
|
fn default() -> Self {
|
||||||
|
NetworkSettings {
|
||||||
|
name: "".into(),
|
||||||
|
chain: "homestead".into(),
|
||||||
|
max_peers: 25,
|
||||||
|
network_port: 30303,
|
||||||
|
rpc_enabled: true,
|
||||||
|
rpc_interface: "local".into(),
|
||||||
|
rpc_port: 8545
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -63,7 +63,7 @@ impl fmt::Display for TrieError {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Trie types
|
/// Trie types
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
pub enum TrieSpec {
|
pub enum TrieSpec {
|
||||||
/// Generic trie.
|
/// Generic trie.
|
||||||
Generic,
|
Generic,
|
||||||
|
Loading…
Reference in New Issue
Block a user