diff --git a/appveyor.yml b/appveyor.yml index 26f82122f..3ffaa961e 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -31,10 +31,10 @@ install: build: off test_script: - - cargo test --verbose --release --no-default-features + - cargo test --verbose --release after_test: - - cargo build --verbose --release --no-default-features + - cargo build --verbose --release - ps: if($env:cert) { Start-FileDownload $env:cert -FileName $env:keyfile } - ps: if($env:cert) { signtool sign /f $env:keyfile /p $env:certpass target\release\parity.exe } - makensis.exe nsis\installer.nsi diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs index 623fab4a6..02535fa11 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/block_queue.rs @@ -36,7 +36,7 @@ const MIN_MEM_LIMIT: usize = 16384; const MIN_QUEUE_LIMIT: usize = 512; /// Block queue configuration -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub struct BlockQueueConfig { /// Maximum number of blocks to keep in unverified queue. /// When the limit is reached, is_full returns true. diff --git a/ethcore/src/blockchain/config.rs b/ethcore/src/blockchain/config.rs index e063d4269..1a0ab9d42 100644 --- a/ethcore/src/blockchain/config.rs +++ b/ethcore/src/blockchain/config.rs @@ -17,7 +17,7 @@ //! Blockchain configuration. /// Blockchain configuration. -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub struct Config { /// Preferred cache size in bytes. pub pref_cache_size: usize, diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 0bb387e74..18a54de70 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -17,7 +17,7 @@ use std::collections::{HashSet, HashMap, VecDeque}; use std::ops::Deref; use std::sync::{Arc, Weak}; -use std::path::{Path, PathBuf}; +use std::path::{Path}; use std::fmt; use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering}; use std::time::{Instant}; @@ -141,26 +141,10 @@ pub struct Client { } const HISTORY: u64 = 1200; -// DO NOT TOUCH THIS ANY MORE UNLESS YOU REALLY KNOW WHAT YOU'RE DOING. -// Altering it will force a blanket DB update for *all* JournalDB-derived -// databases. -// Instead, add/upgrade the version string of the individual JournalDB-derived database -// of which you actually want force an upgrade. -const CLIENT_DB_VER_STR: &'static str = "5.3"; - -/// Get the path for the databases given the root path and information on the databases. -pub fn get_db_path(path: &Path, pruning: journaldb::Algorithm, genesis_hash: H256, fork_name: Option<&String>) -> PathBuf { - let mut dir = path.to_path_buf(); - dir.push(format!("{:?}{}", H64::from(genesis_hash), fork_name.map(|f| format!("-{}", f)).unwrap_or_default())); - //TODO: sec/fat: pruned/full versioning - // version here is a bit useless now, since it's controlled only be the pruning algo. - dir.push(format!("v{}-sec-{}", CLIENT_DB_VER_STR, pruning)); - dir -} /// Append a path element to the given path and return the string. -pub fn append_path(path: &Path, item: &str) -> String { - let mut p = path.to_path_buf(); +pub fn append_path

(path: P, item: &str) -> String where P: AsRef { + let mut p = path.as_ref().to_path_buf(); p.push(item); p.to_str().unwrap().to_owned() } @@ -174,7 +158,7 @@ impl Client { miner: Arc, message_channel: IoChannel, ) -> Result, ClientError> { - let path = get_db_path(path, config.pruning, spec.genesis_header().hash(), spec.fork_name.as_ref()); + let path = path.to_path_buf(); let gb = spec.genesis_block(); let chain = Arc::new(BlockChain::new(config.blockchain, &gb, &path)); let tracedb = Arc::new(try!(TraceDB::new(config.tracing, &path, chain.clone()))); diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs index 1010ce656..4cfd04e2d 100644 --- a/ethcore/src/client/config.rs +++ b/ethcore/src/client/config.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::str::FromStr; pub use std::time::Duration; pub use block_queue::BlockQueueConfig; pub use blockchain::Config as BlockChainConfig; @@ -33,7 +34,21 @@ pub enum DatabaseCompactionProfile { } impl Default for DatabaseCompactionProfile { - fn default() -> Self { DatabaseCompactionProfile::Default } + fn default() -> Self { + DatabaseCompactionProfile::Default + } +} + +impl FromStr for DatabaseCompactionProfile { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "ssd" | "default" => Ok(DatabaseCompactionProfile::Default), + "hdd" => Ok(DatabaseCompactionProfile::HDD), + _ => Err(format!("Invalid compaction profile given. Expected hdd/ssd (default).")), + } + } } /// Operating mode for the client. @@ -50,11 +65,13 @@ pub enum Mode { } impl Default for Mode { - fn default() -> Self { Mode::Active } + fn default() -> Self { + Mode::Active + } } /// Client configuration. Includes configs for all sub-systems. -#[derive(Debug, Default)] +#[derive(Debug, PartialEq, Default)] pub struct ClientConfig { /// Block queue configuration. pub queue: BlockQueueConfig, @@ -79,3 +96,25 @@ pub struct ClientConfig { /// Type of block verifier used by client. pub verifier_type: VerifierType, } + +#[cfg(test)] +mod test { + use super::{DatabaseCompactionProfile, Mode}; + + #[test] + fn test_default_compaction_profile() { + assert_eq!(DatabaseCompactionProfile::default(), DatabaseCompactionProfile::Default); + } + + #[test] + fn test_parsing_compaction_profile() { + assert_eq!(DatabaseCompactionProfile::Default, "ssd".parse().unwrap()); + assert_eq!(DatabaseCompactionProfile::Default, "default".parse().unwrap()); + assert_eq!(DatabaseCompactionProfile::HDD, "hdd".parse().unwrap()); + } + + #[test] + fn test_mode_default() { + assert_eq!(Mode::default(), Mode::Active); + } +} diff --git a/ethcore/src/evm/factory.rs b/ethcore/src/evm/factory.rs index 082b9d050..50c384a99 100644 --- a/ethcore/src/evm/factory.rs +++ b/ethcore/src/evm/factory.rs @@ -21,7 +21,7 @@ use std::fmt; use evm::Evm; use util::{U256, Uint}; -#[derive(Debug, Clone)] +#[derive(Debug, PartialEq, Clone)] /// Type of EVM to use. pub enum VMType { /// JIT EVM diff --git a/ethcore/src/json_tests/trie.rs b/ethcore/src/json_tests/trie.rs index 2d23ff7d2..e62fd01b3 100644 --- a/ethcore/src/json_tests/trie.rs +++ b/ethcore/src/json_tests/trie.rs @@ -15,7 +15,7 @@ // along with Parity. If not, see . use ethjson; -use util::{H256, MemoryDB, TrieMut, TrieSpec, TrieFactory}; +use util::{H256, MemoryDB, TrieSpec, TrieFactory}; fn test_trie(json: &[u8], trie: TrieSpec) -> Vec { let tests = ethjson::trie::Test::load(json).unwrap(); diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index 97ba6c082..79c8a95bf 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -36,7 +36,7 @@ use client::TransactionImportResult; use miner::price_info::PriceInfo; /// Different possible definitions for pending transaction set. -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub enum PendingSet { /// Always just the transactions in the queue. These have had only cheap checks. AlwaysQueue, @@ -48,7 +48,7 @@ pub enum PendingSet { } /// Configures the behaviour of the miner. -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub struct MinerOptions { /// URLs to notify when there is new work. pub new_work_notify: Vec, @@ -77,12 +77,12 @@ impl Default for MinerOptions { MinerOptions { new_work_notify: vec![], force_sealing: false, - reseal_on_external_tx: true, + reseal_on_external_tx: false, reseal_on_own_tx: true, tx_gas_limit: !U256::zero(), tx_queue_size: 1024, pending_set: PendingSet::AlwaysQueue, - reseal_min_period: Duration::from_secs(0), + reseal_min_period: Duration::from_secs(2), work_queue_size: 20, enable_resubmission: true, } @@ -90,6 +90,7 @@ impl Default for MinerOptions { } /// Options for the dynamic gas price recalibrator. +#[derive(Debug, PartialEq)] pub struct GasPriceCalibratorOptions { /// Base transaction price to match against. pub usd_per_tx: f32, @@ -98,9 +99,9 @@ pub struct GasPriceCalibratorOptions { } /// The gas price validator variant for a `GasPricer`. +#[derive(Debug, PartialEq)] pub struct GasPriceCalibrator { options: GasPriceCalibratorOptions, - next_calibration: Instant, } @@ -128,6 +129,7 @@ impl GasPriceCalibrator { } /// Struct to look after updating the acceptable gas price of a miner. +#[derive(Debug, PartialEq)] pub enum GasPricer { /// A fixed gas price in terms of Wei - always the argument given. Fixed(U256), diff --git a/ethcore/src/trace/config.rs b/ethcore/src/trace/config.rs index 2876baa28..1c4646817 100644 --- a/ethcore/src/trace/config.rs +++ b/ethcore/src/trace/config.rs @@ -15,6 +15,7 @@ // along with Parity. If not, see . //! Traces config. +use std::str::FromStr; use bloomchain::Config as BloomConfig; use trace::Error; @@ -29,6 +30,25 @@ pub enum Switch { Auto, } +impl Default for Switch { + fn default() -> Self { + Switch::Auto + } +} + +impl FromStr for Switch { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "on" => Ok(Switch::On), + "off" => Ok(Switch::Off), + "auto" => Ok(Switch::Auto), + other => Err(format!("Invalid switch value: {}", other)) + } + } +} + impl Switch { /// Tries to turn old switch to new value. pub fn turn_to(&self, to: Switch) -> Result { @@ -41,7 +61,7 @@ impl Switch { } /// Traces config. -#[derive(Debug, Clone)] +#[derive(Debug, PartialEq, Clone)] pub struct Config { /// Indicates if tracing should be enabled or not. /// If it's None, it will be automatically configured. @@ -55,7 +75,7 @@ pub struct Config { impl Default for Config { fn default() -> Self { Config { - enabled: Switch::Auto, + enabled: Switch::default(), blooms: BloomConfig { levels: 3, elements_per_index: 16, @@ -64,3 +84,20 @@ impl Default for Config { } } } + +#[cfg(test)] +mod tests { + use super::Switch; + + #[test] + fn test_switch_parsing() { + assert_eq!(Switch::On, "on".parse().unwrap()); + assert_eq!(Switch::Off, "off".parse().unwrap()); + assert_eq!(Switch::Auto, "auto".parse().unwrap()); + } + + #[test] + fn test_switch_default() { + assert_eq!(Switch::default(), Switch::Auto); + } +} diff --git a/ethcore/src/verification/mod.rs b/ethcore/src/verification/mod.rs index 10aee21f4..53c38a6b0 100644 --- a/ethcore/src/verification/mod.rs +++ b/ethcore/src/verification/mod.rs @@ -25,7 +25,7 @@ pub use self::canon_verifier::CanonVerifier; pub use self::noop_verifier::NoopVerifier; /// Verifier type. -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub enum VerifierType { /// Verifies block normally. Canon, diff --git a/ethstore/src/dir/disk.rs b/ethstore/src/dir/disk.rs index b94fe5214..af6ad917c 100644 --- a/ethstore/src/dir/disk.rs +++ b/ethstore/src/dir/disk.rs @@ -14,16 +14,18 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::{fs, ffi, io}; +use std::{fs, io}; use std::path::{PathBuf, Path}; use std::collections::HashMap; use time; use ethkey::Address; -use {libc, json, SafeAccount, Error}; +use {json, SafeAccount, Error}; use super::KeyDirectory; #[cfg(not(windows))] fn restrict_permissions_to_owner(file_path: &Path) -> Result<(), i32> { + use std::ffi; + use libc; let cstr = ffi::CString::new(file_path.to_str().unwrap()).unwrap(); match unsafe { libc::chmod(cstr.as_ptr(), libc::S_IWUSR | libc::S_IRUSR) } { 0 => Ok(()), @@ -32,7 +34,7 @@ fn restrict_permissions_to_owner(file_path: &Path) -> Result<(), i32> { } #[cfg(windows)] -fn restrict_permissions_to_owner(file_path: &Path) -> Result<(), i32> { +fn restrict_permissions_to_owner(_file_path: &Path) -> Result<(), i32> { Ok(()) } diff --git a/logger/src/lib.rs b/logger/src/lib.rs index 521c3a2d7..2a6c0bb35 100644 --- a/logger/src/lib.rs +++ b/logger/src/lib.rs @@ -36,39 +36,25 @@ use regex::Regex; use util::RotatingLogger; use util::log::Colour; -pub struct Settings { +#[derive(Debug, PartialEq)] +pub struct Config { + pub mode: Option, pub color: bool, - pub init: Option, pub file: Option, } -impl Settings { - pub fn new() -> Settings { - Settings { - color: true, - init: None, +impl Default for Config { + fn default() -> Self { + Config { + mode: None, + color: !cfg!(windows), file: None, } } - - pub fn init(mut self, init: String) -> Settings { - self.init = Some(init); - self - } - - pub fn file(mut self, file: String) -> Settings { - self.file = Some(file); - self - } - - pub fn no_color(mut self) -> Settings { - self.color = false; - self - } } /// Sets up the logger -pub fn setup_log(settings: &Settings) -> Arc { +pub fn setup_log(config: &Config) -> Result, String> { use rlog::*; let mut levels = String::new(); @@ -84,16 +70,21 @@ pub fn setup_log(settings: &Settings) -> Arc { builder.parse(lvl); } - if let Some(ref s) = settings.init { + if let Some(ref s) = config.mode { levels.push_str(s); builder.parse(s); } let isatty = stderr_isatty(); - let enable_color = settings.color && isatty; + let enable_color = config.color && isatty; let logs = Arc::new(RotatingLogger::new(levels)); let logger = logs.clone(); - let maybe_file = settings.file.as_ref().map(|f| File::create(f).unwrap_or_else(|_| panic!("Cannot write to log file given: {}", f))); + + let maybe_file = match config.file.as_ref() { + Some(f) => Some(try!(File::create(f).map_err(|_| format!("Cannot write to log file given: {}", f)))), + None => None, + }; + let format = move |record: &LogRecord| { let timestamp = time::strftime("%Y-%m-%d %H:%M:%S %Z", &time::now()).unwrap(); @@ -123,9 +114,11 @@ pub fn setup_log(settings: &Settings) -> Arc { ret }; + builder.format(format); builder.init().unwrap(); - logs + + Ok(logs) } fn kill_color(s: &str) -> String { diff --git a/parity/account.rs b/parity/account.rs new file mode 100644 index 000000000..3c4a5dd74 --- /dev/null +++ b/parity/account.rs @@ -0,0 +1,84 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use ethcore::ethstore::{EthStore, import_accounts}; +use ethcore::ethstore::dir::DiskDirectory; +use ethcore::account_provider::AccountProvider; +use helpers::{password_prompt, password_from_file}; + +#[derive(Debug, PartialEq)] +pub enum AccountCmd { + New(NewAccount), + List(String), + Import(ImportAccounts), +} + +#[derive(Debug, PartialEq)] +pub struct NewAccount { + pub iterations: u32, + pub path: String, + pub password_file: Option, +} + +#[derive(Debug, PartialEq)] +pub struct ImportAccounts { + pub from: Vec, + pub to: String, +} + +pub fn execute(cmd: AccountCmd) -> Result { + match cmd { + AccountCmd::New(new_cmd) => new(new_cmd), + AccountCmd::List(path) => list(path), + AccountCmd::Import(import_cmd) => import(import_cmd), + } +} + +fn new(n: NewAccount) -> Result { + let password: String = match n.password_file { + Some(file) => try!(password_from_file(file)), + None => try!(password_prompt()), + }; + + let dir = Box::new(DiskDirectory::create(n.path).unwrap()); + let secret_store = Box::new(EthStore::open_with_iterations(dir, n.iterations).unwrap()); + let acc_provider = AccountProvider::new(secret_store); + let new_account = acc_provider.new_account(&password).unwrap(); + Ok(format!("{:?}", new_account)) +} + +fn list(path: String) -> Result { + let dir = Box::new(DiskDirectory::create(path).unwrap()); + let secret_store = Box::new(EthStore::open(dir).unwrap()); + let acc_provider = AccountProvider::new(secret_store); + let accounts = acc_provider.accounts(); + let result = accounts.into_iter() + .map(|a| format!("{:?}", a)) + .collect::>() + .join("\n"); + + Ok(result) +} + +fn import(i: ImportAccounts) -> Result { + let to = DiskDirectory::create(i.to).unwrap(); + let mut imported = 0; + for path in &i.from { + let from = DiskDirectory::at(path); + imported += try!(import_accounts(&from, &to).map_err(|_| "Importing accounts failed.")).len(); + } + Ok(format!("{}", imported)) +} diff --git a/parity/blockchain.rs b/parity/blockchain.rs new file mode 100644 index 000000000..5843b9d03 --- /dev/null +++ b/parity/blockchain.rs @@ -0,0 +1,284 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::str::{FromStr, from_utf8}; +use std::{io, fs}; +use std::io::{BufReader, BufRead}; +use std::time::Duration; +use std::thread::sleep; +use std::path::Path; +use std::sync::Arc; +use rustc_serialize::hex::FromHex; +use ethcore_logger::{setup_log, Config as LogConfig}; +use util::panics::{PanicHandler, ForwardPanic}; +use util::{PayloadInfo, ToPretty}; +use ethcore::service::ClientService; +use ethcore::client::{Mode, DatabaseCompactionProfile, Switch, VMType, BlockImportError, BlockChainClient, BlockID}; +use ethcore::error::ImportError; +use ethcore::miner::Miner; +use cache::CacheConfig; +use informant::Informant; +use params::{SpecType, Pruning}; +use helpers::{to_client_config, execute_upgrades}; +use dir::Directories; +use fdlimit; + +#[derive(Debug, PartialEq)] +pub enum DataFormat { + Hex, + Binary, +} + +impl Default for DataFormat { + fn default() -> Self { + DataFormat::Binary + } +} + +impl FromStr for DataFormat { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "binary" | "bin" => Ok(DataFormat::Binary), + "hex" => Ok(DataFormat::Hex), + x => Err(format!("Invalid format: {}", x)) + } + } +} + +#[derive(Debug, PartialEq)] +pub enum BlockchainCmd { + Import(ImportBlockchain), + Export(ExportBlockchain), +} + +#[derive(Debug, PartialEq)] +pub struct ImportBlockchain { + pub spec: SpecType, + pub logger_config: LogConfig, + pub cache_config: CacheConfig, + pub dirs: Directories, + pub file_path: Option, + pub format: Option, + pub pruning: Pruning, + pub compaction: DatabaseCompactionProfile, + pub mode: Mode, + pub tracing: Switch, + pub vm_type: VMType, +} + +#[derive(Debug, PartialEq)] +pub struct ExportBlockchain { + pub spec: SpecType, + pub logger_config: LogConfig, + pub cache_config: CacheConfig, + pub dirs: Directories, + pub file_path: Option, + pub format: Option, + pub pruning: Pruning, + pub compaction: DatabaseCompactionProfile, + pub mode: Mode, + pub tracing: Switch, + pub from_block: BlockID, + pub to_block: BlockID, +} + +pub fn execute(cmd: BlockchainCmd) -> Result { + match cmd { + BlockchainCmd::Import(import_cmd) => execute_import(import_cmd), + BlockchainCmd::Export(export_cmd) => execute_export(export_cmd), + } +} + +fn execute_import(cmd: ImportBlockchain) -> Result { + // Setup panic handler + let panic_handler = PanicHandler::new_in_arc(); + + // load spec file + let spec = try!(cmd.spec.spec()); + + // load genesis hash + let genesis_hash = spec.genesis_header().hash(); + + // Setup logging + let _logger = setup_log(&cmd.logger_config); + + fdlimit::raise_fd_limit(); + + // select pruning algorithm + let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref()); + + // prepare client_path + let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm); + + // execute upgrades + try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm)); + + // prepare client config + let client_config = to_client_config(&cmd.cache_config, &cmd.dirs, genesis_hash, cmd.mode, cmd.tracing, cmd.pruning, cmd.compaction, cmd.vm_type, "".into(), spec.fork_name.as_ref()); + + // build client + let service = try!(ClientService::start( + client_config, + spec, + Path::new(&client_path), + Arc::new(Miner::with_spec(try!(cmd.spec.spec()))), + ).map_err(|e| format!("Client service error: {:?}", e))); + + panic_handler.forward_from(&service); + let client = service.client(); + + let mut instream: Box = match cmd.file_path { + Some(f) => Box::new(try!(fs::File::open(&f).map_err(|_| format!("Cannot open given file: {}", f)))), + None => Box::new(io::stdin()), + }; + + const READAHEAD_BYTES: usize = 8; + + let mut first_bytes: Vec = vec![0; READAHEAD_BYTES]; + let mut first_read = 0; + + let format = match cmd.format { + Some(format) => format, + None => { + first_read = try!(instream.read(&mut first_bytes).map_err(|_| "Error reading from the file/stream.")); + match first_bytes[0] { + 0xf9 => DataFormat::Binary, + _ => DataFormat::Hex, + } + } + }; + + let informant = Informant::new(client.clone(), None, None, cmd.logger_config.color); + + let do_import = |bytes| { + while client.queue_info().is_full() { sleep(Duration::from_secs(1)); } + match client.import_block(bytes) { + Err(BlockImportError::Import(ImportError::AlreadyInChain)) => { + trace!("Skipping block already in chain."); + } + Err(e) => { + return Err(format!("Cannot import block: {:?}", e)); + }, + Ok(_) => {}, + } + informant.tick(); + Ok(()) + }; + + + match format { + DataFormat::Binary => { + loop { + let mut bytes = if first_read > 0 {first_bytes.clone()} else {vec![0; READAHEAD_BYTES]}; + let n = if first_read > 0 { + first_read + } else { + try!(instream.read(&mut bytes).map_err(|_| "Error reading from the file/stream.")) + }; + if n == 0 { break; } + first_read = 0; + let s = try!(PayloadInfo::from(&bytes).map_err(|e| format!("Invalid RLP in the file/stream: {:?}", e))).total(); + bytes.resize(s, 0); + try!(instream.read_exact(&mut bytes[READAHEAD_BYTES..]).map_err(|_| "Error reading from the file/stream.")); + try!(do_import(bytes)); + } + } + DataFormat::Hex => { + for line in BufReader::new(instream).lines() { + let s = try!(line.map_err(|_| "Error reading from the file/stream.")); + let s = if first_read > 0 {from_utf8(&first_bytes).unwrap().to_owned() + &(s[..])} else {s}; + first_read = 0; + let bytes = try!(s.from_hex().map_err(|_| "Invalid hex in file/stream.")); + try!(do_import(bytes)); + } + } + } + client.flush_queue(); + + Ok("Import completed.".into()) +} + +fn execute_export(cmd: ExportBlockchain) -> Result { + // Setup panic handler + let panic_handler = PanicHandler::new_in_arc(); + + let format = cmd.format.unwrap_or_else(Default::default); + + // load spec file + let spec = try!(cmd.spec.spec()); + + // load genesis hash + let genesis_hash = spec.genesis_header().hash(); + + // Setup logging + let _logger = setup_log(&cmd.logger_config); + + fdlimit::raise_fd_limit(); + + // select pruning algorithm + let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref()); + + // prepare client_path + let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm); + + // execute upgrades + try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm)); + + // prepare client config + let client_config = to_client_config(&cmd.cache_config, &cmd.dirs, genesis_hash, cmd.mode, cmd.tracing, cmd.pruning, cmd.compaction, VMType::default(), "".into(), spec.fork_name.as_ref()); + + let service = try!(ClientService::start( + client_config, + spec, + Path::new(&client_path), + Arc::new(Miner::with_spec(try!(cmd.spec.spec()))) + ).map_err(|e| format!("Client service error: {:?}", e))); + + panic_handler.forward_from(&service); + let client = service.client(); + + let mut out: Box = match cmd.file_path { + Some(f) => Box::new(try!(fs::File::create(&f).map_err(|_| format!("Cannot write to file given: {}", f)))), + None => Box::new(io::stdout()), + }; + + let from = try!(client.block_number(cmd.from_block).ok_or("From block could not be found")); + let to = try!(client.block_number(cmd.to_block).ok_or("From block could not be found")); + + for i in from..(to + 1) { + let b = client.block(BlockID::Number(i)).unwrap(); + match format { + DataFormat::Binary => { out.write(&b).expect("Couldn't write to stream."); } + DataFormat::Hex => { out.write_fmt(format_args!("{}", b.pretty())).expect("Couldn't write to stream."); } + } + } + + Ok("Export completed.".into()) +} + +#[cfg(test)] +mod test { + use super::DataFormat; + + #[test] + fn test_data_format_parsing() { + assert_eq!(DataFormat::Binary, "binary".parse().unwrap()); + assert_eq!(DataFormat::Binary, "bin".parse().unwrap()); + assert_eq!(DataFormat::Hex, "hex".parse().unwrap()); + } +} diff --git a/parity/cache.rs b/parity/cache.rs new file mode 100644 index 000000000..45f1cb5f5 --- /dev/null +++ b/parity/cache.rs @@ -0,0 +1,109 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::cmp::max; + +const MIN_BC_CACHE_MB: u32 = 4; +const MIN_DB_CACHE_MB: u32 = 2; +const MIN_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 16; +const DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 50; + +/// Configuration for application cache sizes. +/// All values are represented in MB. +#[derive(Debug, PartialEq)] +pub struct CacheConfig { + /// Size of database cache set using option `set_block_cache_size_mb` + /// 50% is blockchain + /// 25% is tracing + /// 25% is state + db: u32, + /// Size of blockchain cache. + blockchain: u32, + /// Size of transaction queue cache. + queue: u32, +} + +impl Default for CacheConfig { + fn default() -> Self { + CacheConfig::new(64, 8, DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB) + } +} + +impl CacheConfig { + /// Creates new cache config with cumulative size equal `total`. + pub fn new_with_total_cache_size(total: u32) -> Self { + CacheConfig { + db: total * 7 / 8, + blockchain: total / 8, + queue: DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB, + } + } + + /// Creates new cache config with gitven details. + pub fn new(db: u32, blockchain: u32, queue: u32) -> Self { + CacheConfig { + db: db, + blockchain: blockchain, + queue: queue, + } + } + + /// Size of db cache for blockchain. + pub fn db_blockchain_cache_size(&self) -> u32 { + max(MIN_DB_CACHE_MB, self.blockchain / 4) + } + + /// Size of db cache for state. + pub fn db_state_cache_size(&self) -> u32 { + max(MIN_DB_CACHE_MB, self.db * 3 / 4) + } + + /// Size of block queue size limit + pub fn queue(&self) -> u32 { + max(self.queue, MIN_BLOCK_QUEUE_SIZE_LIMIT_MB) + } + + /// Size of the blockchain cache. + pub fn blockchain(&self) -> u32 { + max(self.blockchain, MIN_BC_CACHE_MB) + } +} + +#[cfg(test)] +mod tests { + use super::CacheConfig; + + #[test] + fn test_cache_config_constructor() { + let config = CacheConfig::new_with_total_cache_size(200); + assert_eq!(config.db, 175); + assert_eq!(config.blockchain(), 25); + assert_eq!(config.queue(), 50); + } + + #[test] + fn test_cache_config_db_cache_sizes() { + let config = CacheConfig::new_with_total_cache_size(400); + assert_eq!(config.db, 350); + assert_eq!(config.db_blockchain_cache_size(), 12); + assert_eq!(config.db_state_cache_size(), 262); + } + + #[test] + fn test_cache_config_default() { + assert_eq!(CacheConfig::default(), CacheConfig::new(64, 8, super::DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB)); + } +} diff --git a/parity/cli.rs b/parity/cli.rs index 60aca8310..11d58eb22 100644 --- a/parity/cli.rs +++ b/parity/cli.rs @@ -15,6 +15,7 @@ // along with Parity. If not, see . use util::version; +use docopt::Docopt; pub const USAGE: &'static str = r#" Parity. Ethereum Client. @@ -22,6 +23,8 @@ Parity. Ethereum Client. Copyright 2015, 2016 Ethcore (UK) Limited Usage: + parity [options] + parity ui [options] parity daemon [options] parity account (new | list ) [options] parity account import ... [options] @@ -29,8 +32,6 @@ Usage: parity import [ ] [options] parity export [ ] [options] parity signer new-token [options] - parity [options] - parity ui [options] Operating Options: --mode MODE Set the operating mode. MODE can be one of: @@ -105,8 +106,8 @@ API and Console Options: --jsonrpc-apis APIS Specify the APIs available through the JSONRPC interface. APIS is a comma-delimited list of API name. Possible name are web3, eth, net, personal, - ethcore, ethcore_set, traces. - [default: web3,eth,net,ethcore,personal,traces]. + ethcore, ethcore_set, traces, rpc. + [default: web3,eth,net,ethcore,personal,traces,rpc]. --jsonrpc-hosts HOSTS List of allowed Host header values. This option will validate the Host header sent by the browser, it is additional security against some attack @@ -201,18 +202,16 @@ Footprint Options: fast - maintain journal overlay. Fast but 50MB used. auto - use the method most recently synced or default to fast if none synced [default: auto]. - --cache-pref-size BYTES Specify the preferred size of the blockchain cache in - bytes [default: 16384]. - --cache-max-size BYTES Specify the maximum size of the blockchain cache in - bytes [default: 262144]. - --queue-max-size BYTES Specify the maximum size of memory to use for block - queue [default: 52428800]. - --cache MEGABYTES Set total amount of discretionary memory to use for + --cache-size-db MB Override database cache size [default: 64]. + --cache-size-blocks MB Specify the prefered size of the blockchain cache in + megabytes [default: 8]. + --cache-size-queue MB Specify the maximum size of memory to use for block + queue [default: 50]. + --cache-size MB Set total amount of discretionary memory to use for the entire system, overrides other cache and queue options. Database Options: - --db-cache-size MB Override RocksDB database cache size. --db-compaction TYPE Database compaction type. TYPE may be one of: ssd - suitable for SSDs and fast HDDs; hdd - suitable for slow HDDs [default: ssd]. @@ -260,6 +259,7 @@ Legacy Options: --basic-tx-usd. --etherbase ADDRESS Equivalent to --author ADDRESS. --extradata STRING Equivalent to --extra-data STRING. + --cache MB Equivalent to --cache-size MB. Miscellaneous Options: -l --logging LOGGING Specify the logging level. Must conform to the same @@ -271,7 +271,7 @@ Miscellaneous Options: -h --help Show this screen. "#; -#[derive(Debug, RustcDecodable)] +#[derive(Debug, PartialEq, RustcDecodable)] pub struct Args { pub cmd_daemon: bool, pub cmd_account: bool, @@ -294,7 +294,6 @@ pub struct Args { pub flag_identity: String, pub flag_unlock: Option, pub flag_password: Vec, - pub flag_cache: Option, pub flag_keys_path: String, pub flag_keys_iterations: u32, pub flag_no_import_keys: bool, @@ -309,9 +308,13 @@ pub struct Args { pub flag_node_key: Option, pub flag_reserved_peers: Option, pub flag_reserved_only: bool, - pub flag_cache_pref_size: usize, - pub flag_cache_max_size: usize, - pub flag_queue_max_size: usize, + + pub flag_cache_size_db: u32, + pub flag_cache_size_blocks: u32, + pub flag_cache_size_queue: u32, + pub flag_cache_size: Option, + pub flag_cache: Option, + pub flag_no_jsonrpc: bool, pub flag_jsonrpc_interface: String, pub flag_jsonrpc_port: u16, @@ -380,13 +383,18 @@ pub struct Args { pub flag_dapps_off: bool, pub flag_ipcpath: Option, pub flag_ipcapi: Option, - pub flag_db_cache_size: Option, pub flag_db_compaction: String, pub flag_fat_db: bool, } -pub fn print_version() { - println!("\ +impl Default for Args { + fn default() -> Self { + Docopt::new(USAGE).unwrap().argv(&[] as &[&str]).decode().unwrap() + } +} + +pub fn print_version() -> String { + format!("\ Parity version {} Copyright 2015, 2016 Ethcore (UK) Limited @@ -395,6 +403,6 @@ This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. By Wood/Paronyan/Kotewicz/Drwięga/Volf.\ -", version()); +", version()) } diff --git a/parity/configuration.rs b/parity/configuration.rs index ce9b7d679..8ac7a2af0 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -14,57 +14,228 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::env; -use std::fs::File; use std::time::Duration; -use std::io::{BufRead, BufReader}; -use std::net::{SocketAddr, IpAddr}; +use std::io::Read; +use std::net::SocketAddr; use std::path::PathBuf; use cli::{USAGE, Args}; -use docopt::Docopt; - -use die::*; -use util::*; -use util::log::Colour::*; -use ethcore::account_provider::AccountProvider; +use docopt::{Docopt, Error as DocoptError}; +use util::{Hashable, NetworkConfiguration, U256, Uint, is_valid_node_url, Bytes, version_data, Secret, Address}; use util::network_settings::NetworkSettings; -use ethcore::client::{append_path, get_db_path, Mode, ClientConfig, DatabaseCompactionProfile, Switch, VMType}; -use ethcore::miner::{MinerOptions, PendingSet, GasPricer, GasPriceCalibratorOptions}; -use ethcore::ethereum; -use ethcore::spec::Spec; -use ethsync::SyncConfig; -use rpc::IpcConfiguration; -use ethcore_logger::Settings as LogSettings; +use util::log::Colour; +use ethcore::client::{VMType, Mode}; +use ethcore::miner::MinerOptions; -pub struct Configuration { - pub args: Args +use rpc::{IpcConfiguration, HttpConfiguration}; +use cache::CacheConfig; +use helpers::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_price, replace_home, +geth_ipc_path, parity_ipc_path, to_bootnodes, to_addresses, to_address}; +use params::{ResealPolicy, AccountsConfig, GasPricerConfig, MinerExtras}; +use ethcore_logger::Config as LogConfig; +use dir::Directories; +use dapps::Configuration as DappsConfiguration; +use signer::Configuration as SignerConfiguration; +use run::RunCmd; +use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain}; +use presale::ImportWallet; +use account::{AccountCmd, NewAccount, ImportAccounts}; + +#[derive(Debug, PartialEq)] +pub enum Cmd { + Run(RunCmd), + Version, + Account(AccountCmd), + ImportPresaleWallet(ImportWallet), + Blockchain(BlockchainCmd), + SignerToken(String), } -pub struct Directories { - pub keys: String, - pub db: String, - pub dapps: String, - pub signer: String, +#[derive(Debug, PartialEq)] +pub struct Configuration { + pub args: Args, } impl Configuration { - pub fn parse() -> Self { - Configuration { - args: Docopt::new(USAGE).and_then(|d| d.decode()).unwrap_or_else(|e| e.exit()), + pub fn parse(command: I) -> Result where I: IntoIterator, S: AsRef { + let args = try!(Docopt::new(USAGE).and_then(|d| d.argv(command).decode())); + + let config = Configuration { + args: args, + }; + + Ok(config) + } + + pub fn into_command(self) -> Result { + let dirs = self.directories(); + let pruning = try!(self.args.flag_pruning.parse()); + let vm_type = try!(self.vm_type()); + let mode = try!(to_mode(&self.args.flag_mode, self.args.flag_mode_timeout, self.args.flag_mode_alarm)); + let miner_options = try!(self.miner_options()); + let logger_config = self.logger_config(); + let http_conf = try!(self.http_config()); + let ipc_conf = try!(self.ipc_config()); + let net_conf = try!(self.net_config()); + let network_id = try!(self.network_id()); + let cache_config = self.cache_config(); + let spec = try!(self.chain().parse()); + let tracing = try!(self.args.flag_tracing.parse()); + let compaction = try!(self.args.flag_db_compaction.parse()); + let enable_network = self.enable_network(&mode); + let geth_compatibility = self.args.flag_geth; + let signer_port = self.signer_port(); + let dapps_conf = self.dapps_config(); + let signer_conf = self.signer_config(); + + let cmd = if self.args.flag_version { + Cmd::Version + } else if self.args.cmd_signer { + Cmd::SignerToken(dirs.signer) + } else if self.args.cmd_account { + let account_cmd = if self.args.cmd_new { + let new_acc = NewAccount { + iterations: self.args.flag_keys_iterations, + path: dirs.keys, + password_file: self.args.flag_password.first().cloned(), + }; + AccountCmd::New(new_acc) + } else if self.args.cmd_list { + AccountCmd::List(dirs.keys) + } else if self.args.cmd_import { + let import_acc = ImportAccounts { + from: self.args.arg_path.clone(), + to: dirs.keys, + }; + AccountCmd::Import(import_acc) + } else { + unreachable!(); + }; + Cmd::Account(account_cmd) + } else if self.args.cmd_wallet { + let presale_cmd = ImportWallet { + iterations: self.args.flag_keys_iterations, + path: dirs.keys, + wallet_path: self.args.arg_path.first().unwrap().clone(), + password_file: self.args.flag_password.first().cloned(), + }; + Cmd::ImportPresaleWallet(presale_cmd) + } else if self.args.cmd_import { + let import_cmd = ImportBlockchain { + spec: spec, + logger_config: logger_config, + cache_config: cache_config, + dirs: dirs, + file_path: self.args.arg_file.clone(), + format: None, + pruning: pruning, + compaction: compaction, + mode: mode, + tracing: tracing, + vm_type: vm_type, + }; + Cmd::Blockchain(BlockchainCmd::Import(import_cmd)) + } else if self.args.cmd_export { + let export_cmd = ExportBlockchain { + spec: spec, + logger_config: logger_config, + cache_config: cache_config, + dirs: dirs, + file_path: self.args.arg_file.clone(), + format: None, + pruning: pruning, + compaction: compaction, + mode: mode, + tracing: tracing, + from_block: try!(to_block_id(&self.args.flag_from)), + to_block: try!(to_block_id(&self.args.flag_to)), + }; + Cmd::Blockchain(BlockchainCmd::Export(export_cmd)) + } else { + let daemon = if self.args.cmd_daemon { + Some(self.args.arg_pid_file.clone()) + } else { + None + }; + + let run_cmd = RunCmd { + cache_config: cache_config, + dirs: dirs, + spec: spec, + pruning: pruning, + daemon: daemon, + logger_config: logger_config, + miner_options: miner_options, + http_conf: http_conf, + ipc_conf: ipc_conf, + net_conf: net_conf, + network_id: network_id, + acc_conf: try!(self.accounts_config()), + gas_pricer: try!(self.gas_pricer_config()), + miner_extras: try!(self.miner_extras()), + mode: mode, + tracing: tracing, + compaction: compaction, + vm_type: vm_type, + enable_network: enable_network, + geth_compatibility: geth_compatibility, + signer_port: signer_port, + net_settings: self.network_settings(), + dapps_conf: dapps_conf, + signer_conf: signer_conf, + ui: self.args.cmd_ui, + name: self.args.flag_identity, + custom_bootnodes: self.args.flag_bootnodes.is_some(), + }; + Cmd::Run(run_cmd) + }; + + Ok(cmd) + } + + fn enable_network(&self, mode: &Mode) -> bool { + match *mode { + Mode::Dark(_) => false, + _ => !self.args.flag_no_network, } } - pub fn mode(&self) -> Mode { - match &(self.args.flag_mode[..]) { - "active" => Mode::Active, - "passive" => Mode::Passive(Duration::from_secs(self.args.flag_mode_timeout), Duration::from_secs(self.args.flag_mode_alarm)), - "dark" => Mode::Dark(Duration::from_secs(self.args.flag_mode_timeout)), - _ => die!("{}: Invalid address for --mode. Must be one of active, passive or dark.", self.args.flag_mode), + fn vm_type(&self) -> Result { + if self.args.flag_jitvm { + VMType::jit().ok_or("Parity is built without the JIT EVM.".into()) + } else { + Ok(VMType::Interpreter) } } - fn net_port(&self) -> u16 { - self.args.flag_port + fn miner_extras(&self) -> Result { + let extras = MinerExtras { + author: try!(self.author()), + extra_data: try!(self.extra_data()), + gas_floor_target: try!(to_u256(&self.args.flag_gas_floor_target)), + gas_ceil_target: try!(to_u256(&self.args.flag_gas_cap)), + transactions_limit: self.args.flag_tx_queue_size, + }; + + Ok(extras) + } + + fn author(&self) -> Result { + to_address(self.args.flag_etherbase.clone().or(self.args.flag_author.clone())) + } + + fn cache_config(&self) -> CacheConfig { + match self.args.flag_cache_size.or(self.args.flag_cache) { + Some(size) => CacheConfig::new_with_total_cache_size(size), + None => CacheConfig::new(self.args.flag_cache_size_db, self.args.flag_cache_size_blocks, self.args.flag_cache_size_queue), + } + } + + fn logger_config(&self) -> LogConfig { + LogConfig { + mode: self.args.flag_logging.clone(), + color: !self.args.flag_no_color && !cfg!(windows), + file: self.args.flag_log_file.clone(), + } } fn chain(&self) -> String { @@ -79,358 +250,168 @@ impl Configuration { self.args.flag_maxpeers.unwrap_or(self.args.flag_peers) as u32 } - fn decode_u256(d: &str, argument: &str) -> U256 { - U256::from_dec_str(d).unwrap_or_else(|_| - U256::from_str(clean_0x(d)).unwrap_or_else(|_| - die!("{}: Invalid numeric value for {}. Must be either a decimal or a hex number.", d, argument) - ) - ) - } - fn work_notify(&self) -> Vec { self.args.flag_notify_work.as_ref().map_or_else(Vec::new, |s| s.split(',').map(|s| s.to_owned()).collect()) } - pub fn miner_options(&self) -> MinerOptions { - let (own, ext) = match self.args.flag_reseal_on_txs.as_str() { - "none" => (false, false), - "own" => (true, false), - "ext" => (false, true), - "all" => (true, true), - x => die!("{}: Invalid value for --reseal option. Use --help for more information.", x) + fn accounts_config(&self) -> Result { + let cfg = AccountsConfig { + iterations: self.args.flag_keys_iterations, + import_keys: !self.args.flag_no_import_keys, + testnet: self.args.flag_testnet, + password_files: self.args.flag_password.clone(), + unlocked_accounts: try!(to_addresses(&self.args.flag_unlock)), }; - MinerOptions { + + Ok(cfg) + } + + fn miner_options(&self) -> Result { + let reseal = try!(self.args.flag_reseal_on_txs.parse::()); + + let options = MinerOptions { new_work_notify: self.work_notify(), force_sealing: self.args.flag_force_sealing, - reseal_on_external_tx: ext, - reseal_on_own_tx: own, - tx_gas_limit: self.args.flag_tx_gas_limit.as_ref().map_or(!U256::zero(), |d| Self::decode_u256(d, "--tx-gas-limit")), - tx_queue_size: self.args.flag_tx_queue_size, - pending_set: match self.args.flag_relay_set.as_str() { - "cheap" => PendingSet::AlwaysQueue, - "strict" => PendingSet::AlwaysSealing, - "lenient" => PendingSet::SealingOrElseQueue, - x => die!("{}: Invalid value for --relay-set option. Use --help for more information.", x) + reseal_on_external_tx: reseal.external, + reseal_on_own_tx: reseal.own, + tx_gas_limit: match self.args.flag_tx_gas_limit { + Some(ref d) => try!(to_u256(d)), + None => U256::max_value(), }, + tx_queue_size: self.args.flag_tx_queue_size, + pending_set: try!(to_pending_set(&self.args.flag_relay_set)), reseal_min_period: Duration::from_millis(self.args.flag_reseal_min_period), work_queue_size: self.args.flag_work_queue_size, enable_resubmission: !self.args.flag_remove_solved, - } - } - - pub fn author(&self) -> Option

{ - self.args.flag_etherbase.as_ref() - .or(self.args.flag_author.as_ref()) - .map(|d| Address::from_str(clean_0x(d)).unwrap_or_else(|_| { - die!("{}: Invalid address for --author. Must be 40 hex characters, with or without the 0x at the beginning.", d) - })) - } - - pub fn gas_floor_target(&self) -> U256 { - let d = &self.args.flag_gas_floor_target; - U256::from_dec_str(d).unwrap_or_else(|_| { - die!("{}: Invalid target gas floor given. Must be a decimal unsigned 256-bit number.", d) - }) - } - - pub fn gas_ceil_target(&self) -> U256 { - let d = &self.args.flag_gas_cap; - U256::from_dec_str(d).unwrap_or_else(|_| { - die!("{}: Invalid target gas ceiling given. Must be a decimal unsigned 256-bit number.", d) - }) - } - - fn to_duration(s: &str) -> Duration { - let bad = |_| { - die!("{}: Invalid duration given. See parity --help for more information.", s) }; - Duration::from_secs(match s { - "twice-daily" => 12 * 60 * 60, - "half-hourly" => 30 * 60, - "1second" | "1 second" | "second" => 1, - "1minute" | "1 minute" | "minute" => 60, - "hourly" | "1hour" | "1 hour" | "hour" => 60 * 60, - "daily" | "1day" | "1 day" | "day" => 24 * 60 * 60, - x if x.ends_with("seconds") => FromStr::from_str(&x[0..x.len() - 7]).unwrap_or_else(bad), - x if x.ends_with("minutes") => FromStr::from_str(&x[0..x.len() - 7]).unwrap_or_else(bad) * 60, - x if x.ends_with("hours") => FromStr::from_str(&x[0..x.len() - 5]).unwrap_or_else(bad) * 60 * 60, - x if x.ends_with("days") => FromStr::from_str(&x[0..x.len() - 4]).unwrap_or_else(bad) * 24 * 60 * 60, - x => FromStr::from_str(x).unwrap_or_else(bad), - }) + + Ok(options) } - pub fn gas_pricer(&self) -> GasPricer { - match self.args.flag_gasprice.as_ref() { - Some(d) => { - GasPricer::Fixed(U256::from_dec_str(d).unwrap_or_else(|_| { - die!("{}: Invalid gas price given. Must be a decimal unsigned 256-bit number.", d) - })) - } - _ => { - let usd_per_tx: f32 = FromStr::from_str(&self.args.flag_usd_per_tx).unwrap_or_else(|_| { - die!("{}: Invalid basic transaction price given in USD. Must be a decimal number.", self.args.flag_usd_per_tx) - }); - match self.args.flag_usd_per_eth.as_str() { - "auto" => { - GasPricer::new_calibrated(GasPriceCalibratorOptions { - usd_per_tx: usd_per_tx, - recalibration_period: Self::to_duration(self.args.flag_price_update_period.as_str()), - }) - }, - x => { - let usd_per_eth: f32 = FromStr::from_str(x).unwrap_or_else(|_| die!("{}: Invalid ether price given in USD. Must be a decimal number.", x)); - let wei_per_usd: f32 = 1.0e18 / usd_per_eth; - let gas_per_tx: f32 = 21000.0; - let wei_per_gas: f32 = wei_per_usd * usd_per_tx / gas_per_tx; - info!("Using a fixed conversion rate of Ξ1 = {} ({} wei/gas)", White.bold().paint(format!("US${}", usd_per_eth)), Yellow.bold().paint(format!("{}", wei_per_gas))); - GasPricer::Fixed(U256::from_dec_str(&format!("{:.0}", wei_per_gas)).unwrap()) - } - } - } + fn signer_config(&self) -> SignerConfiguration { + SignerConfiguration { + enabled: self.signer_enabled(), + port: self.args.flag_signer_port, + signer_path: self.directories().signer, } } - pub fn extra_data(&self) -> Bytes { + fn dapps_config(&self) -> DappsConfiguration { + DappsConfiguration { + enabled: self.dapps_enabled(), + interface: self.dapps_interface(), + port: self.args.flag_dapps_port, + user: self.args.flag_dapps_user.clone(), + pass: self.args.flag_dapps_pass.clone(), + dapps_path: self.directories().dapps, + } + } + + fn gas_pricer_config(&self) -> Result { + if let Some(d) = self.args.flag_gasprice.as_ref() { + return Ok(GasPricerConfig::Fixed(try!(to_u256(d)))); + } + + let usd_per_tx = try!(to_price(&self.args.flag_usd_per_tx)); + if "auto" == self.args.flag_usd_per_eth.as_str() { + return Ok(GasPricerConfig::Calibrated { + usd_per_tx: usd_per_tx, + recalibration_period: try!(to_duration(self.args.flag_price_update_period.as_str())), + }); + } + + let usd_per_eth = try!(to_price(&self.args.flag_usd_per_eth)); + let wei_per_usd: f32 = 1.0e18 / usd_per_eth; + let gas_per_tx: f32 = 21000.0; + let wei_per_gas: f32 = wei_per_usd * usd_per_tx / gas_per_tx; + + info!( + "Using a fixed conversion rate of Ξ1 = {} ({} wei/gas)", + Colour::White.bold().paint(format!("US${}", usd_per_eth)), + Colour::Yellow.bold().paint(format!("{}", wei_per_gas)) + ); + + Ok(GasPricerConfig::Fixed(U256::from_dec_str(&format!("{:.0}", wei_per_gas)).unwrap())) + } + + fn extra_data(&self) -> Result { match self.args.flag_extradata.as_ref().or(self.args.flag_extra_data.as_ref()) { - Some(ref x) if x.len() <= 32 => x.as_bytes().to_owned(), - None => version_data(), - Some(ref x) => { die!("{}: Extra data must be at most 32 characters.", x); } + Some(ref x) if x.len() <= 32 => Ok(x.as_bytes().to_owned()), + None => Ok(version_data()), + Some(_) => Err("Extra data must be at most 32 characters".into()), } } - pub fn spec(&self) -> Spec { - match self.chain().as_str() { - "frontier" | "homestead" | "mainnet" => ethereum::new_frontier(), - "frontier-dogmatic" | "homestead-dogmatic" | "classic" => ethereum::new_classic(), - "morden" | "testnet" => ethereum::new_morden(), - "olympic" => ethereum::new_olympic(), - f => Spec::load(contents(f).unwrap_or_else(|_| { - die!("{}: Couldn't read chain specification file. Sure it exists?", f) - }).as_ref()), - } - } - - pub fn normalize_enode(e: &str) -> Option { - if is_valid_node_url(e) { - Some(e.to_owned()) - } else { - None - } - } - - pub fn init_nodes(&self, spec: &Spec) -> Vec { - match self.args.flag_bootnodes { - Some(ref x) if !x.is_empty() => x.split(',').map(|s| { - Self::normalize_enode(s).unwrap_or_else(|| { - die!("{}: Invalid node address format given for a boot node.", s) - }) - }).collect(), - Some(_) => Vec::new(), - None => spec.nodes().to_owned(), - } - } - - pub fn init_reserved_nodes(&self) -> Vec { + fn init_reserved_nodes(&self) -> Result, String> { use std::fs::File; - if let Some(ref path) = self.args.flag_reserved_peers { - let mut buffer = String::new(); - let mut node_file = File::open(path).unwrap_or_else(|e| { - die!("Error opening reserved nodes file: {}", e); - }); - node_file.read_to_string(&mut buffer).expect("Error reading reserved node file"); - buffer.lines().map(|s| { - Self::normalize_enode(s).unwrap_or_else(|| { - die!("{}: Invalid node address format given for a reserved node.", s); - }) - }).collect() - } else { - Vec::new() + match self.args.flag_reserved_peers { + Some(ref path) => { + let mut buffer = String::new(); + let mut node_file = try!(File::open(path).map_err(|e| format!("Error opening reserved nodes file: {}", e))); + try!(node_file.read_to_string(&mut buffer).map_err(|_| "Error reading reserved node file")); + if let Some(invalid) = buffer.lines().find(|s| !is_valid_node_url(s)) { + Err(format!("Invalid node address format given for a boot node: {}", invalid)) + } else { + Ok(buffer.lines().map(|s| s.to_owned()).collect()) + } + }, + None => Ok(Vec::new()) } } - pub fn net_addresses(&self) -> (Option, Option) { - let port = self.net_port(); - let listen_address = Some(SocketAddr::new(IpAddr::from_str("0.0.0.0").unwrap(), port)); + fn net_addresses(&self) -> Result<(Option, Option), String> { + let port = self.args.flag_port; + let listen_address = Some(SocketAddr::new("0.0.0.0".parse().unwrap(), port)); let public_address = if self.args.flag_nat.starts_with("extip:") { let host = &self.args.flag_nat[6..]; - let host = IpAddr::from_str(host).unwrap_or_else(|_| die!("Invalid host given with `--nat extip:{}`", host)); + let host = try!(host.parse().map_err(|_| format!("Invalid host given with `--nat extip:{}`", host))); Some(SocketAddr::new(host, port)) } else { None }; - (listen_address, public_address) + Ok((listen_address, public_address)) } - pub fn net_settings(&self, spec: &Spec) -> NetworkConfiguration { + fn net_config(&self) -> Result { let mut ret = NetworkConfiguration::new(); ret.nat_enabled = self.args.flag_nat == "any" || self.args.flag_nat == "upnp"; - ret.boot_nodes = self.init_nodes(spec); - let (listen, public) = self.net_addresses(); + ret.boot_nodes = try!(to_bootnodes(&self.args.flag_bootnodes)); + let (listen, public) = try!(self.net_addresses()); ret.listen_address = listen; ret.public_address = public; - ret.use_secret = self.args.flag_node_key.as_ref().map(|s| Secret::from_str(s).unwrap_or_else(|_| s.sha3())); + ret.use_secret = self.args.flag_node_key.as_ref().map(|s| s.parse::().unwrap_or_else(|_| s.sha3())); ret.discovery_enabled = !self.args.flag_no_discovery && !self.args.flag_nodiscover; ret.ideal_peers = self.max_peers(); - let mut net_path = PathBuf::from(&self.path()); + let mut net_path = PathBuf::from(self.directories().db); net_path.push("network"); ret.config_path = Some(net_path.to_str().unwrap().to_owned()); - ret.reserved_nodes = self.init_reserved_nodes(); + ret.reserved_nodes = try!(self.init_reserved_nodes()); if self.args.flag_reserved_only { ret.non_reserved_mode = ::util::network::NonReservedPeerMode::Deny; } - ret + Ok(ret) } - fn find_best_db(&self, spec: &Spec) -> Option { - let mut ret = None; - let mut latest_era = None; - let jdb_types = [journaldb::Algorithm::Archive, journaldb::Algorithm::EarlyMerge, journaldb::Algorithm::OverlayRecent, journaldb::Algorithm::RefCounted]; - for i in jdb_types.into_iter() { - let db = journaldb::new(&append_path(&get_db_path(Path::new(&self.path()), *i, spec.genesis_header().hash(), spec.fork_name.as_ref()), "state"), *i, kvdb::DatabaseConfig::default()); - trace!(target: "parity", "Looking for best DB: {} at {:?}", i, db.latest_era()); - match (latest_era, db.latest_era()) { - (Some(best), Some(this)) if best >= this => {} - (_, None) => {} - (_, Some(this)) => { - latest_era = Some(this); - ret = Some(*i); - } - } - } - ret - } - - pub fn pruning_algorithm(&self, spec: &Spec) -> journaldb::Algorithm { - match self.args.flag_pruning.as_str() { - "archive" => journaldb::Algorithm::Archive, - "light" => journaldb::Algorithm::EarlyMerge, - "fast" => journaldb::Algorithm::OverlayRecent, - "basic" => journaldb::Algorithm::RefCounted, - "auto" => self.find_best_db(spec).unwrap_or(journaldb::Algorithm::OverlayRecent), - _ => { die!("Invalid pruning method given."); } + fn network_id(&self) -> Result, String> { + let net_id = self.args.flag_network_id.as_ref().or(self.args.flag_networkid.as_ref()); + match net_id { + Some(id) => Ok(Some(try!(to_u256(id)))), + None => Ok(None), } } - pub fn client_config(&self, spec: &Spec) -> ClientConfig { - let mut client_config = ClientConfig::default(); - - client_config.mode = self.mode(); - - match self.args.flag_cache { - Some(mb) => { - client_config.blockchain.max_cache_size = mb * 1024 * 1024; - client_config.blockchain.pref_cache_size = client_config.blockchain.max_cache_size * 3 / 4; - } - None => { - client_config.blockchain.pref_cache_size = self.args.flag_cache_pref_size; - client_config.blockchain.max_cache_size = self.args.flag_cache_max_size; - } - } - // forced blockchain (blocks + extras) db cache size if provided - client_config.blockchain.db_cache_size = self.args.flag_db_cache_size.and_then(|cs| Some(cs / 2)); - - client_config.tracing.enabled = match self.args.flag_tracing.as_str() { - "auto" => Switch::Auto, - "on" => Switch::On, - "off" => Switch::Off, - _ => { die!("Invalid tracing method given!") } - }; - // forced trace db cache size if provided - client_config.tracing.db_cache_size = self.args.flag_db_cache_size.and_then(|cs| Some(cs / 4)); - - client_config.pruning = self.pruning_algorithm(spec); - - if self.args.flag_fat_db { - if let journaldb::Algorithm::Archive = client_config.pruning { - client_config.trie_spec = TrieSpec::Fat; - } else { - die!("Fatdb is not supported. Please re-run with --pruning=archive") - } - } - - // forced state db cache size if provided - client_config.db_cache_size = self.args.flag_db_cache_size.and_then(|cs| Some(cs / 4)); - - // compaction profile - client_config.db_compaction = match self.args.flag_db_compaction.as_str() { - "ssd" => DatabaseCompactionProfile::Default, - "hdd" => DatabaseCompactionProfile::HDD, - _ => { die!("Invalid compaction profile given (--db-compaction argument), expected hdd/ssd (default)."); } - }; - - if self.args.flag_jitvm { - client_config.vm_type = VMType::jit().unwrap_or_else(|| die!("Parity is built without the JIT EVM.")) - } - - trace!(target: "parity", "Using pruning strategy of {}", client_config.pruning); - client_config.name = self.args.flag_identity.clone(); - client_config.queue.max_mem_use = self.args.flag_queue_max_size; - client_config - } - - pub fn sync_config(&self, spec: &Spec) -> SyncConfig { - let mut sync_config = SyncConfig::default(); - sync_config.network_id = self.args.flag_network_id.as_ref().or(self.args.flag_networkid.as_ref()).map_or(spec.network_id(), |id| { - U256::from_str(id).unwrap_or_else(|_| die!("{}: Invalid index given with --network-id/--networkid", id)) - }); - sync_config - } - - pub fn account_service(&self) -> AccountProvider { - use ethcore::ethstore::{import_accounts, EthStore}; - use ethcore::ethstore::dir::{GethDirectory, DirectoryType, DiskDirectory}; - - // Secret Store - let passwords = self.args.flag_password.iter().flat_map(|filename| { - BufReader::new(&File::open(filename).unwrap_or_else(|_| die!("{} Unable to read password file. Ensure it exists and permissions are correct.", filename))) - .lines() - .map(|l| l.unwrap()) - .collect::>() - .into_iter() - }).collect::>(); - - if !self.args.flag_no_import_keys { - let dir_type = if self.args.flag_testnet { - DirectoryType::Testnet - } else { - DirectoryType::Main - }; - - let from = GethDirectory::open(dir_type); - let to = DiskDirectory::create(self.keys_path()).unwrap(); - // ignore error, cause geth may not exist - let _ = import_accounts(&from, &to); - } - - let dir = Box::new(DiskDirectory::create(self.keys_path()).unwrap()); - let iterations = self.keys_iterations(); - let account_service = AccountProvider::new(Box::new(EthStore::open_with_iterations(dir, iterations).unwrap())); - - if let Some(ref unlocks) = self.args.flag_unlock { - for d in unlocks.split(',') { - let a = Address::from_str(clean_0x(d)).unwrap_or_else(|_| { - die!("{}: Invalid address for --unlock. Must be 40 hex characters, without the 0x at the beginning.", d) - }); - if passwords.iter().find(|p| account_service.unlock_account_permanently(a, (*p).clone()).is_ok()).is_none() { - die!("No password given to unlock account {}. Pass the password using `--password`.", a); - } - } - } - account_service - } - - pub fn rpc_apis(&self) -> String { + fn rpc_apis(&self) -> String { self.args.flag_rpcapi.clone().unwrap_or(self.args.flag_jsonrpc_apis.clone()) } - pub fn rpc_cors(&self) -> Option> { + fn rpc_cors(&self) -> Option> { let cors = self.args.flag_jsonrpc_cors.clone().or(self.args.flag_rpccorsdomain.clone()); cors.map(|c| c.split(',').map(|s| s.to_owned()).collect()) } - pub fn rpc_hosts(&self) -> Option> { + fn rpc_hosts(&self) -> Option> { match self.args.flag_jsonrpc_hosts.as_ref() { "none" => return Some(Vec::new()), "all" => return None, @@ -440,65 +421,54 @@ impl Configuration { Some(hosts) } - fn geth_ipc_path(&self) -> String { - if cfg!(windows) { - r"\\.\pipe\geth.ipc".to_owned() - } else { - match self.args.flag_testnet { - true => path::ethereum::with_testnet("geth.ipc"), - false => path::ethereum::with_default("geth.ipc"), - }.to_str().unwrap().to_owned() - } - } - - pub fn keys_iterations(&self) -> u32 { - self.args.flag_keys_iterations - } - - pub fn ipc_settings(&self) -> IpcConfiguration { - IpcConfiguration { + fn ipc_config(&self) -> Result { + let conf = IpcConfiguration { enabled: !(self.args.flag_ipcdisable || self.args.flag_ipc_off || self.args.flag_no_ipc), socket_addr: self.ipc_path(), - apis: self.args.flag_ipcapi.clone().unwrap_or(self.args.flag_ipc_apis.clone()), - } + apis: try!(self.args.flag_ipcapi.clone().unwrap_or(self.args.flag_ipc_apis.clone()).parse()), + }; + + Ok(conf) } - pub fn network_settings(&self) -> NetworkSettings { - if self.args.flag_jsonrpc { println!("WARNING: Flag -j/--json-rpc is deprecated. JSON-RPC is now on by default. Ignoring."); } + fn http_config(&self) -> Result { + let conf = HttpConfiguration { + enabled: !self.args.flag_jsonrpc_off && !self.args.flag_no_jsonrpc, + interface: self.rpc_interface(), + port: self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port), + apis: try!(self.rpc_apis().parse()), + hosts: self.rpc_hosts(), + cors: self.rpc_cors(), + }; + + Ok(conf) + } + + fn network_settings(&self) -> NetworkSettings { NetworkSettings { name: self.args.flag_identity.clone(), chain: self.chain(), max_peers: self.max_peers(), - network_port: self.net_port(), + network_port: self.args.flag_port, rpc_enabled: !self.args.flag_jsonrpc_off && !self.args.flag_no_jsonrpc, rpc_interface: self.args.flag_rpcaddr.clone().unwrap_or(self.args.flag_jsonrpc_interface.clone()), rpc_port: self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port), } } - pub fn directories(&self) -> Directories { - let db_path = Configuration::replace_home( - self.args.flag_datadir.as_ref().unwrap_or(&self.args.flag_db_path)); - ::std::fs::create_dir_all(&db_path).unwrap_or_else(|e| die_with_io_error("main", e)); + fn directories(&self) -> Directories { + let db_path = replace_home(self.args.flag_datadir.as_ref().unwrap_or(&self.args.flag_db_path)); - let keys_path = Configuration::replace_home( + let keys_path = replace_home( if self.args.flag_testnet { "$HOME/.parity/testnet_keys" } else { &self.args.flag_keys_path } ); - ::std::fs::create_dir_all(&keys_path).unwrap_or_else(|e| die_with_io_error("main", e)); - let dapps_path = Configuration::replace_home(&self.args.flag_dapps_path); - ::std::fs::create_dir_all(&dapps_path).unwrap_or_else(|e| die_with_io_error("main", e)); - let signer_path = Configuration::replace_home(&self.args.flag_signer_path); - ::std::fs::create_dir_all(&signer_path).unwrap_or_else(|e| die_with_io_error("main", e)); - if self.args.flag_geth { - let geth_path = path::ethereum::default(); - ::std::fs::create_dir_all(geth_path.as_path()).unwrap_or_else( - |e| die!("Error while attempting to create '{}' for geth mode: {}", &geth_path.to_str().unwrap(), e)); - } + let dapps_path = replace_home(&self.args.flag_dapps_path); + let signer_path = replace_home(&self.args.flag_signer_path); Directories { keys: keys_path, @@ -508,33 +478,15 @@ impl Configuration { } } - pub fn keys_path(&self) -> String { - self.directories().keys - } - - pub fn path(&self) -> String { - self.directories().db - } - - fn replace_home(arg: &str) -> String { - arg.replace("$HOME", env::home_dir().unwrap().to_str().unwrap()) - } - fn ipc_path(&self) -> String { if self.args.flag_geth { - self.geth_ipc_path() - } else if cfg!(windows) { - r"\\.\pipe\parity.jsonrpc".to_owned() + geth_ipc_path(self.args.flag_testnet) } else { - Configuration::replace_home(&self.args.flag_ipcpath.clone().unwrap_or(self.args.flag_ipc_path.clone())) + parity_ipc_path(&self.args.flag_ipcpath.clone().unwrap_or(self.args.flag_ipc_path.clone())) } } - pub fn have_color(&self) -> bool { - !self.args.flag_no_color && !cfg!(windows) - } - - pub fn signer_port(&self) -> Option { + fn signer_port(&self) -> Option { if !self.signer_enabled() { None } else { @@ -542,7 +494,7 @@ impl Configuration { } } - pub fn rpc_interface(&self) -> String { + fn rpc_interface(&self) -> String { match self.network_settings().rpc_interface.as_str() { "all" => "0.0.0.0", "local" => "127.0.0.1", @@ -550,18 +502,18 @@ impl Configuration { }.into() } - pub fn dapps_interface(&self) -> String { + fn dapps_interface(&self) -> String { match self.args.flag_dapps_interface.as_str() { "local" => "127.0.0.1", x => x, }.into() } - pub fn dapps_enabled(&self) -> bool { + fn dapps_enabled(&self) -> bool { !self.args.flag_dapps_off && !self.args.flag_no_dapps && cfg!(feature = "dapps") } - pub fn signer_enabled(&self) -> bool { + fn signer_enabled(&self) -> bool { if self.args.flag_force_signer { return true; } @@ -572,20 +524,6 @@ impl Configuration { return !signer_disabled; } - - pub fn log_settings(&self) -> LogSettings { - let mut settings = LogSettings::new(); - if self.args.flag_no_color || cfg!(windows) { - settings = settings.no_color(); - } - if let Some(ref init) = self.args.flag_logging { - settings = settings.init(init.to_owned()) - } - if let Some(ref file) = self.args.flag_log_file { - settings = settings.file(file.to_owned()) - } - settings - } } #[cfg(test)] @@ -594,6 +532,15 @@ mod tests { use cli::USAGE; use docopt::Docopt; use util::network_settings::NetworkSettings; + use ethcore::client::{VMType, BlockID}; + use helpers::{replace_home, default_network_config}; + use run::RunCmd; + use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain}; + use presale::ImportWallet; + use account::{AccountCmd, NewAccount, ImportAccounts}; + + #[derive(Debug, PartialEq)] + struct TestPasswordReader(&'static str); fn parse(args: &[&str]) -> Configuration { Configuration { @@ -601,6 +548,137 @@ mod tests { } } + #[test] + fn test_command_version() { + let args = vec!["parity", "--version"]; + let conf = Configuration::parse(args).unwrap(); + assert_eq!(conf.into_command().unwrap(), Cmd::Version); + } + + #[test] + fn test_command_account_new() { + let args = vec!["parity", "account", "new"]; + let conf = Configuration::parse(args).unwrap(); + assert_eq!(conf.into_command().unwrap(), Cmd::Account(AccountCmd::New(NewAccount { + iterations: 10240, + path: replace_home("$HOME/.parity/keys"), + password_file: None, + }))); + } + + #[test] + fn test_command_account_list() { + let args = vec!["parity", "account", "list"]; + let conf = Configuration::parse(args).unwrap(); + assert_eq!(conf.into_command().unwrap(), Cmd::Account( + AccountCmd::List(replace_home("$HOME/.parity/keys"))) + ); + } + + #[test] + fn test_command_account_import() { + let args = vec!["parity", "account", "import", "my_dir", "another_dir"]; + let conf = Configuration::parse(args).unwrap(); + assert_eq!(conf.into_command().unwrap(), Cmd::Account(AccountCmd::Import(ImportAccounts { + from: vec!["my_dir".into(), "another_dir".into()], + to: replace_home("$HOME/.parity/keys"), + }))); + } + + #[test] + fn test_command_wallet_import() { + let args = vec!["parity", "wallet", "import", "my_wallet.json", "--password", "pwd"]; + let conf = Configuration::parse(args).unwrap(); + assert_eq!(conf.into_command().unwrap(), Cmd::ImportPresaleWallet(ImportWallet { + iterations: 10240, + path: replace_home("$HOME/.parity/keys"), + wallet_path: "my_wallet.json".into(), + password_file: Some("pwd".into()), + })); + } + + #[test] + fn test_command_blockchain_import() { + let args = vec!["parity", "import", "blockchain.json"]; + let conf = Configuration::parse(args).unwrap(); + assert_eq!(conf.into_command().unwrap(), Cmd::Blockchain(BlockchainCmd::Import(ImportBlockchain { + spec: Default::default(), + logger_config: Default::default(), + cache_config: Default::default(), + dirs: Default::default(), + file_path: Some("blockchain.json".into()), + format: None, + pruning: Default::default(), + compaction: Default::default(), + mode: Default::default(), + tracing: Default::default(), + vm_type: VMType::Interpreter, + }))); + } + + #[test] + fn test_command_blockchain_export() { + let args = vec!["parity", "export", "blockchain.json"]; + let conf = Configuration::parse(args).unwrap(); + assert_eq!(conf.into_command().unwrap(), Cmd::Blockchain(BlockchainCmd::Export(ExportBlockchain { + spec: Default::default(), + logger_config: Default::default(), + cache_config: Default::default(), + dirs: Default::default(), + file_path: Some("blockchain.json".into()), + pruning: Default::default(), + format: Default::default(), + compaction: Default::default(), + mode: Default::default(), + tracing: Default::default(), + from_block: BlockID::Number(1), + to_block: BlockID::Latest, + }))); + } + + #[test] + fn test_command_signer_new_token() { + let args = vec!["parity", "signer", "new-token"]; + let conf = Configuration::parse(args).unwrap(); + let expected = replace_home("$HOME/.parity/signer"); + assert_eq!(conf.into_command().unwrap(), Cmd::SignerToken(expected)); + } + + #[test] + fn test_run_cmd() { + let args = vec!["parity"]; + let conf = Configuration::parse(args).unwrap(); + assert_eq!(conf.into_command().unwrap(), Cmd::Run(RunCmd { + cache_config: Default::default(), + dirs: Default::default(), + spec: Default::default(), + pruning: Default::default(), + daemon: None, + logger_config: Default::default(), + miner_options: Default::default(), + http_conf: Default::default(), + ipc_conf: Default::default(), + net_conf: default_network_config(), + network_id: None, + acc_conf: Default::default(), + gas_pricer: Default::default(), + miner_extras: Default::default(), + mode: Default::default(), + tracing: Default::default(), + compaction: Default::default(), + vm_type: Default::default(), + enable_network: true, + geth_compatibility: false, + signer_port: Some(8180), + net_settings: Default::default(), + dapps_conf: Default::default(), + signer_conf: Default::default(), + ui: false, + name: "".into(), + custom_bootnodes: false, + })); + } + #[test] fn should_parse_network_settings() { // given diff --git a/parity/dapps.rs b/parity/dapps.rs index 917c59fc6..9fb01a30a 100644 --- a/parity/dapps.rs +++ b/parity/dapps.rs @@ -15,17 +15,17 @@ // along with Parity. If not, see . use std::sync::Arc; -use std::str::FromStr; use std::net::SocketAddr; use util::panics::PanicHandler; -use die::*; use rpc_apis; +use helpers::replace_home; #[cfg(feature = "dapps")] pub use ethcore_dapps::Server as WebappServer; #[cfg(not(feature = "dapps"))] pub struct WebappServer; +#[derive(Debug, PartialEq, Clone)] pub struct Configuration { pub enabled: bool, pub interface: String, @@ -35,18 +35,31 @@ pub struct Configuration { pub dapps_path: String, } +impl Default for Configuration { + fn default() -> Self { + Configuration { + enabled: true, + interface: "127.0.0.1".into(), + port: 8080, + user: None, + pass: None, + dapps_path: replace_home("$HOME/.parity/dapps"), + } + } +} + pub struct Dependencies { pub panic_handler: Arc, pub apis: Arc, } -pub fn new(configuration: Configuration, deps: Dependencies) -> Option { +pub fn new(configuration: Configuration, deps: Dependencies) -> Result, String> { if !configuration.enabled { - return None; + return Ok(None); } let url = format!("{}:{}", configuration.interface, configuration.port); - let addr = SocketAddr::from_str(&url).unwrap_or_else(|_| die!("{}: Invalid Webapps listen host/port given.", url)); + let addr = try!(url.parse().map_err(|_| format!("Invalid Webapps listen host/port given: {}", url))); let auth = configuration.user.as_ref().map(|username| { let password = configuration.pass.as_ref().map_or_else(|| { @@ -59,7 +72,7 @@ pub fn new(configuration: Configuration, deps: Dependencies) -> Option, -) -> ! { - die!("Your Parity version has been compiled without WebApps support.") +) -> Result { + Err("Your Parity version has been compiled without WebApps support.".into()) } #[cfg(feature = "dapps")] @@ -78,7 +91,7 @@ pub fn setup_dapps_server( dapps_path: String, url: &SocketAddr, auth: Option<(String, String)> -) -> WebappServer { +) -> Result { use ethcore_dapps as dapps; let server = dapps::ServerBuilder::new(dapps_path); @@ -93,15 +106,14 @@ pub fn setup_dapps_server( }; match start_result { - Err(dapps::ServerError::IoError(err)) => die_with_io_error("WebApps", err), - Err(e) => die!("WebApps: {:?}", e), + Err(dapps::ServerError::IoError(err)) => Err(format!("WebApps io error: {}", err)), + Err(e) => Err(format!("WebApps error: {:?}", e)), Ok(server) => { server.set_panic_handler(move || { deps.panic_handler.notify_all("Panic in WebApp thread.".to_owned()); }); - server + Ok(server) }, } - } diff --git a/parity/deprecated.rs b/parity/deprecated.rs new file mode 100644 index 000000000..5d3a74913 --- /dev/null +++ b/parity/deprecated.rs @@ -0,0 +1,148 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::fmt; +use cli::Args; + +#[derive(Debug, PartialEq)] +pub enum Deprecated { + DoesNothing(&'static str), + Replaced(&'static str, &'static str), +} + +impl fmt::Display for Deprecated { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + match *self { + Deprecated::DoesNothing(s) => write!(f, "Option '{}' does nothing. It's on by default", s), + Deprecated::Replaced(old, new) => write!(f, "Option '{}' is deprecated. Please use '{}' instead", old, new), + } + } +} + +impl Deprecated { + fn jsonrpc() -> Self { + Deprecated::DoesNothing("--jsonrpc") + } + + fn rpc() -> Self { + Deprecated::DoesNothing("--rpc") + } + + fn jsonrpc_off() -> Self { + Deprecated::Replaced("--jsonrpc-off", "--no-jsonrpc") + } + + fn webapp() -> Self { + Deprecated::DoesNothing("--webapp") + } + + fn dapps_off() -> Self { + Deprecated::Replaced("--dapps-off", "--no-daps") + } + + fn ipcdisable() -> Self { + Deprecated::Replaced("--ipcdisable", "--no-ipc") + } + + fn ipc_off() -> Self { + Deprecated::Replaced("--ipc-off", "--no-ipc") + } + + fn etherbase() -> Self { + Deprecated::Replaced("--etherbase", "--author") + } + + fn extradata() -> Self { + Deprecated::Replaced("--extradata", "--extra-data") + } +} + +pub fn find_deprecated(args: &Args) -> Vec { + let mut result = vec![]; + + if args.flag_jsonrpc { + result.push(Deprecated::jsonrpc()); + } + + if args.flag_rpc { + result.push(Deprecated::rpc()); + } + + if args.flag_jsonrpc_off { + result.push(Deprecated::jsonrpc_off()); + } + + if args.flag_webapp { + result.push(Deprecated::webapp()) + } + + if args.flag_dapps_off { + result.push(Deprecated::dapps_off()); + } + + if args.flag_ipcdisable { + result.push(Deprecated::ipcdisable()); + } + + if args.flag_ipc_off { + result.push(Deprecated::ipc_off()); + } + + if args.flag_etherbase.is_some() { + result.push(Deprecated::etherbase()); + } + + if args.flag_extradata.is_some() { + result.push(Deprecated::extradata()); + } + + result +} + +#[cfg(test)] +mod tests { + use cli::Args; + use super::{Deprecated, find_deprecated}; + + #[test] + fn test_find_deprecated() { + assert_eq!(find_deprecated(&Args::default()), vec![]); + assert_eq!(find_deprecated(&{ + let mut args = Args::default(); + args.flag_jsonrpc = true; + args.flag_rpc = true; + args.flag_jsonrpc_off = true; + args.flag_webapp = true; + args.flag_dapps_off = true; + args.flag_ipcdisable = true; + args.flag_ipc_off = true; + args.flag_etherbase = Some(Default::default()); + args.flag_extradata = Some(Default::default()); + args + }), vec![ + Deprecated::jsonrpc(), + Deprecated::rpc(), + Deprecated::jsonrpc_off(), + Deprecated::webapp(), + Deprecated::dapps_off(), + Deprecated::ipcdisable(), + Deprecated::ipc_off(), + Deprecated::etherbase(), + Deprecated::extradata(), + ]); + } +} + diff --git a/parity/die.rs b/parity/die.rs deleted file mode 100644 index 80b31f619..000000000 --- a/parity/die.rs +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2015, 2016 Ethcore (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use std; -use ethcore; -use ethcore::client::Error as ClientError; -use util::UtilError; -use std::process::exit; - -#[macro_export] -macro_rules! die { - ($($arg:tt)*) => (::die::die_with_message(&format!("{}", format_args!($($arg)*)))); -} - -pub fn die_with_error(module: &'static str, e: ethcore::error::Error) -> ! { - use ethcore::error::Error; - - match e { - Error::Util(UtilError::StdIo(e)) => die_with_io_error(module, e), - Error::Client(ClientError::Trace(e)) => die_with_message(&format!("{}", e)), - _ => { - trace!(target: module, "{:?}", e); - die!("{}: {}", module, e); - } - } -} - -pub fn die_with_io_error(module: &'static str, e: std::io::Error) -> ! { - trace!(target: module, "{:?}", e); - - match e.kind() { - std::io::ErrorKind::PermissionDenied => { - die!("{}: No permissions to bind to specified port.", module) - }, - std::io::ErrorKind::AddrInUse => { - die!("{}: Specified address is already in use. Please make sure that nothing is listening on the same port or try using a different one.", module) - }, - std::io::ErrorKind::AddrNotAvailable => { - die!("{}: Could not use specified interface or given address is invalid.", module) - }, - _ => die!("{}: {}", module, e), - } -} - -pub fn die_with_message(msg: &str) -> ! { - println!("ERROR: {}", msg); - exit(1); -} diff --git a/parity/dir.rs b/parity/dir.rs new file mode 100644 index 000000000..7cee88cee --- /dev/null +++ b/parity/dir.rs @@ -0,0 +1,79 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::fs; +use std::path::{PathBuf, Path}; +use util::{H64, H256}; +use util::journaldb::Algorithm; +use helpers::replace_home; + +// this const is irrelevent cause we do have migrations now, +// but we still use it for backwards compatibility +const LEGACY_CLIENT_DB_VER_STR: &'static str = "5.3"; + +#[derive(Debug, PartialEq)] +pub struct Directories { + pub db: String, + pub keys: String, + pub signer: String, + pub dapps: String, +} + +impl Default for Directories { + fn default() -> Self { + Directories { + db: replace_home("$HOME/.parity"), + keys: replace_home("$HOME/.parity/keys"), + signer: replace_home("$HOME/.parity/signer"), + dapps: replace_home("$HOME/.parity/dapps"), + } + } +} + +impl Directories { + pub fn create_dirs(&self) -> Result<(), String> { + try!(fs::create_dir_all(&self.db).map_err(|e| e.to_string())); + try!(fs::create_dir_all(&self.keys).map_err(|e| e.to_string())); + try!(fs::create_dir_all(&self.signer).map_err(|e| e.to_string())); + try!(fs::create_dir_all(&self.dapps).map_err(|e| e.to_string())); + Ok(()) + } + + /// Get the path for the databases given the root path and information on the databases. + pub fn client_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf { + let mut dir = Path::new(&self.db).to_path_buf(); + dir.push(format!("{:?}{}", H64::from(genesis_hash), fork_name.map(|f| format!("-{}", f)).unwrap_or_default())); + dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning)); + dir + } +} + +#[cfg(test)] +mod tests { + use super::Directories; + use helpers::replace_home; + + #[test] + fn test_default_directories() { + let expected = Directories { + db: replace_home("$HOME/.parity"), + keys: replace_home("$HOME/.parity/keys"), + signer: replace_home("$HOME/.parity/signer"), + dapps: replace_home("$HOME/.parity/dapps"), + }; + assert_eq!(expected, Directories::default()); + } +} diff --git a/parity/helpers.rs b/parity/helpers.rs new file mode 100644 index 000000000..05c6e54ea --- /dev/null +++ b/parity/helpers.rs @@ -0,0 +1,391 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::{io, env}; +use std::io::{Write, Read, BufReader, BufRead}; +use std::time::Duration; +use std::path::Path; +use std::fs::File; +use util::{clean_0x, U256, Uint, Address, path, is_valid_node_url, H256}; +use util::journaldb::Algorithm; +use ethcore::client::{Mode, BlockID, Switch, VMType, DatabaseCompactionProfile, ClientConfig}; +use ethcore::miner::PendingSet; +use cache::CacheConfig; +use dir::Directories; +use params::Pruning; +use upgrade::upgrade; +use migration::migrate; + +pub fn to_duration(s: &str) -> Result { + to_seconds(s).map(Duration::from_secs) +} + +fn to_seconds(s: &str) -> Result { + let bad = |_| { + format!("{}: Invalid duration given. See parity --help for more information.", s) + }; + + match s { + "twice-daily" => Ok(12 * 60 * 60), + "half-hourly" => Ok(30 * 60), + "1second" | "1 second" | "second" => Ok(1), + "1minute" | "1 minute" | "minute" => Ok(60), + "hourly" | "1hour" | "1 hour" | "hour" => Ok(60 * 60), + "daily" | "1day" | "1 day" | "day" => Ok(24 * 60 * 60), + x if x.ends_with("seconds") => x[0..x.len() - 7].parse().map_err(bad), + x if x.ends_with("minutes") => x[0..x.len() -7].parse::().map_err(bad).map(|x| x * 60), + x if x.ends_with("hours") => x[0..x.len() - 5].parse::().map_err(bad).map(|x| x * 60 * 60), + x if x.ends_with("days") => x[0..x.len() - 4].parse::().map_err(bad).map(|x| x * 24 * 60 * 60), + x => x.parse().map_err(bad), + } +} + +pub fn to_mode(s: &str, timeout: u64, alarm: u64) -> Result { + match s { + "active" => Ok(Mode::Active), + "passive" => Ok(Mode::Passive(Duration::from_secs(timeout), Duration::from_secs(alarm))), + "dark" => Ok(Mode::Dark(Duration::from_secs(timeout))), + _ => Err(format!("{}: Invalid address for --mode. Must be one of active, passive or dark.", s)), + } +} + +pub fn to_block_id(s: &str) -> Result { + if s == "latest" { + Ok(BlockID::Latest) + } else if let Ok(num) = s.parse() { + Ok(BlockID::Number(num)) + } else if let Ok(hash) = s.parse() { + Ok(BlockID::Hash(hash)) + } else { + Err("Invalid block.".into()) + } +} + +pub fn to_u256(s: &str) -> Result { + if let Ok(decimal) = U256::from_dec_str(s) { + Ok(decimal) + } else if let Ok(hex) = clean_0x(s).parse() { + Ok(hex) + } else { + Err(format!("Invalid numeric value: {}", s)) + } +} + +pub fn to_pending_set(s: &str) -> Result { + match s { + "cheap" => Ok(PendingSet::AlwaysQueue), + "strict" => Ok(PendingSet::AlwaysSealing), + "lenient" => Ok(PendingSet::SealingOrElseQueue), + other => Err(format!("Invalid pending set value: {:?}", other)), + } +} + +pub fn to_address(s: Option) -> Result { + match s { + Some(ref a) => clean_0x(a).parse().map_err(|_| format!("Invalid address: {:?}", a)), + None => Ok(Address::default()) + } +} + +pub fn to_addresses(s: &Option) -> Result, String> { + match *s { + Some(ref adds) if adds.is_empty() => adds.split(',') + .map(|a| clean_0x(a).parse().map_err(|_| format!("Invalid address: {:?}", a))) + .collect(), + _ => Ok(Vec::new()), + } +} + +/// Tries to parse string as a price. +pub fn to_price(s: &str) -> Result { + s.parse::().map_err(|_| format!("Invalid transaciton price 's' given. Must be a decimal number.")) +} + +/// Replaces `$HOME` str with home directory path. +pub fn replace_home(arg: &str) -> String { + // the $HOME directory on mac os should be `~/Library` or `~/Library/Application Support` + let r = arg.replace("$HOME", env::home_dir().unwrap().to_str().unwrap()); + r.replace("/", &::std::path::MAIN_SEPARATOR.to_string() ) +} + +/// Flush output buffer. +pub fn flush_stdout() { + io::stdout().flush().expect("stdout is flushable; qed"); +} + +/// Returns default geth ipc path. +pub fn geth_ipc_path(testnet: bool) -> String { + // Windows path should not be hardcoded here. + // Instead it should be a part of path::ethereum + if cfg!(windows) { + return r"\\.\pipe\geth.ipc".to_owned(); + } + + if testnet { + path::ethereum::with_testnet("geth.ipc").to_str().unwrap().to_owned() + } else { + path::ethereum::with_default("geth.ipc").to_str().unwrap().to_owned() + } +} + +/// Formats and returns parity ipc path. +pub fn parity_ipc_path(s: &str) -> String { + // Windows path should not be hardcoded here. + if cfg!(windows) { + return r"\\.\pipe\parity.jsonrpc".to_owned(); + } + + replace_home(s) +} + +/// Validates and formats bootnodes option. +pub fn to_bootnodes(bootnodes: &Option) -> Result, String> { + match *bootnodes { + Some(ref x) if !x.is_empty() => x.split(',').map(|s| { + if is_valid_node_url(s) { + Ok(s.to_owned()) + } else { + Err(format!("Invalid node address format given for a boot node: {}", s)) + } + }).collect(), + Some(_) => Ok(vec![]), + None => Ok(vec![]) + } +} + +#[cfg(test)] +pub fn default_network_config() -> ::util::NetworkConfiguration { + use util::{NetworkConfiguration, NonReservedPeerMode}; + NetworkConfiguration { + config_path: Some(replace_home("$HOME/.parity/network")), + listen_address: Some("0.0.0.0:30303".parse().unwrap()), + public_address: None, + udp_port: None, + nat_enabled: true, + discovery_enabled: true, + boot_nodes: Vec::new(), + use_secret: None, + ideal_peers: 25, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Accept, + } +} + +pub fn to_client_config( + cache_config: &CacheConfig, + dirs: &Directories, + genesis_hash: H256, + mode: Mode, + tracing: Switch, + pruning: Pruning, + compaction: DatabaseCompactionProfile, + vm_type: VMType, + name: String, + fork_name: Option<&String>, + ) -> ClientConfig { + let mut client_config = ClientConfig::default(); + + let mb = 1024 * 1024; + // in bytes + client_config.blockchain.max_cache_size = cache_config.blockchain() as usize * mb; + // in bytes + client_config.blockchain.pref_cache_size = cache_config.blockchain() as usize * 3 / 4 * mb; + // db blockchain cache size, in megabytes + client_config.blockchain.db_cache_size = Some(cache_config.db_blockchain_cache_size() as usize); + // db state cache size, in megabytes + client_config.db_cache_size = Some(cache_config.db_state_cache_size() as usize); + // db queue cache size, in bytes + client_config.queue.max_mem_use = cache_config.queue() as usize * mb; + + client_config.mode = mode; + client_config.tracing.enabled = tracing; + client_config.pruning = pruning.to_algorithm(dirs, genesis_hash, fork_name); + client_config.db_compaction = compaction; + client_config.vm_type = vm_type; + client_config.name = name; + client_config +} + +pub fn execute_upgrades(dirs: &Directories, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> Result<(), String> { + match upgrade(Some(&dirs.db)) { + Ok(upgrades_applied) if upgrades_applied > 0 => { + debug!("Executed {} upgrade scripts - ok", upgrades_applied); + }, + Err(e) => { + return Err(format!("Error upgrading parity data: {:?}", e)); + }, + _ => {}, + } + + let client_path = dirs.client_path(genesis_hash, fork_name, pruning); + migrate(&client_path, pruning).map_err(|e| format!("{}", e)) +} + +/// Prompts user asking for password. +pub fn password_prompt() -> Result { + use rpassword::read_password; + + println!("Please note that password is NOT RECOVERABLE."); + print!("Type password: "); + flush_stdout(); + + let password = read_password().unwrap(); + + print!("Repeat password: "); + flush_stdout(); + + let password_repeat = read_password().unwrap(); + + if password != password_repeat { + return Err("Passwords do not match!".into()); + } + + Ok(password) +} + +/// Read a password from password file. +pub fn password_from_file

(path: P) -> Result where P: AsRef { + let mut file = try!(File::open(path).map_err(|_| "Unable to open password file.")); + let mut file_content = String::new(); + try!(file.read_to_string(&mut file_content).map_err(|_| "Unable to read password file.")); + // remove eof + Ok((&file_content[..file_content.len() - 1]).to_owned()) +} + +/// Reads passwords from files. Treats each line as a separate password. +pub fn passwords_from_files(files: Vec) -> Result, String> { + let passwords = files.iter().map(|filename| { + let file = try!(File::open(filename).map_err(|_| format!("{} Unable to read password file. Ensure it exists and permissions are correct.", filename))); + let reader = BufReader::new(&file); + let lines = reader.lines() + .map(|l| l.unwrap()) + .collect::>(); + Ok(lines) + }).collect::>, String>>(); + Ok(try!(passwords).into_iter().flat_map(|x| x).collect()) +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + use util::{U256}; + use ethcore::client::{Mode, BlockID}; + use ethcore::miner::PendingSet; + use super::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_address, to_price, geth_ipc_path, to_bootnodes}; + + #[test] + fn test_to_duration() { + assert_eq!(to_duration("twice-daily").unwrap(), Duration::from_secs(12 * 60 * 60)); + assert_eq!(to_duration("half-hourly").unwrap(), Duration::from_secs(30 * 60)); + assert_eq!(to_duration("1second").unwrap(), Duration::from_secs(1)); + assert_eq!(to_duration("2seconds").unwrap(), Duration::from_secs(2)); + assert_eq!(to_duration("15seconds").unwrap(), Duration::from_secs(15)); + assert_eq!(to_duration("1minute").unwrap(), Duration::from_secs(1 * 60)); + assert_eq!(to_duration("2minutes").unwrap(), Duration::from_secs(2 * 60)); + assert_eq!(to_duration("15minutes").unwrap(), Duration::from_secs(15 * 60)); + assert_eq!(to_duration("hourly").unwrap(), Duration::from_secs(60 * 60)); + assert_eq!(to_duration("daily").unwrap(), Duration::from_secs(24 * 60 * 60)); + assert_eq!(to_duration("1hour").unwrap(), Duration::from_secs(1 * 60 * 60)); + assert_eq!(to_duration("2hours").unwrap(), Duration::from_secs(2 * 60 * 60)); + assert_eq!(to_duration("15hours").unwrap(), Duration::from_secs(15 * 60 * 60)); + assert_eq!(to_duration("1day").unwrap(), Duration::from_secs(1 * 24 * 60 * 60)); + assert_eq!(to_duration("2days").unwrap(), Duration::from_secs(2 * 24 *60 * 60)); + assert_eq!(to_duration("15days").unwrap(), Duration::from_secs(15 * 24 * 60 * 60)); + } + + #[test] + fn test_to_mode() { + assert_eq!(to_mode("active", 0, 0).unwrap(), Mode::Active); + assert_eq!(to_mode("passive", 10, 20).unwrap(), Mode::Passive(Duration::from_secs(10), Duration::from_secs(20))); + assert_eq!(to_mode("dark", 20, 30).unwrap(), Mode::Dark(Duration::from_secs(20))); + assert!(to_mode("other", 20, 30).is_err()); + } + + #[test] + fn test_to_block_id() { + assert_eq!(to_block_id("latest").unwrap(), BlockID::Latest); + assert_eq!(to_block_id("0").unwrap(), BlockID::Number(0)); + assert_eq!(to_block_id("2").unwrap(), BlockID::Number(2)); + assert_eq!(to_block_id("15").unwrap(), BlockID::Number(15)); + assert_eq!( + to_block_id("9fc84d84f6a785dc1bd5abacfcf9cbdd3b6afb80c0f799bfb2fd42c44a0c224e").unwrap(), + BlockID::Hash("9fc84d84f6a785dc1bd5abacfcf9cbdd3b6afb80c0f799bfb2fd42c44a0c224e".parse().unwrap()) + ); + } + + #[test] + fn test_to_u256() { + assert_eq!(to_u256("0").unwrap(), U256::from(0)); + assert_eq!(to_u256("11").unwrap(), U256::from(11)); + assert_eq!(to_u256("0x11").unwrap(), U256::from(17)); + assert!(to_u256("u").is_err()) + } + + #[test] + fn test_pending_set() { + assert_eq!(to_pending_set("cheap").unwrap(), PendingSet::AlwaysQueue); + assert_eq!(to_pending_set("strict").unwrap(), PendingSet::AlwaysSealing); + assert_eq!(to_pending_set("lenient").unwrap(), PendingSet::SealingOrElseQueue); + assert!(to_pending_set("othe").is_err()); + } + + #[test] + fn test_to_address() { + assert_eq!( + to_address(Some("0xD9A111feda3f362f55Ef1744347CDC8Dd9964a41".into())).unwrap(), + "D9A111feda3f362f55Ef1744347CDC8Dd9964a41".parse().unwrap() + ); + assert_eq!( + to_address(Some("D9A111feda3f362f55Ef1744347CDC8Dd9964a41".into())).unwrap(), + "D9A111feda3f362f55Ef1744347CDC8Dd9964a41".parse().unwrap() + ); + assert_eq!(to_address(None).unwrap(), Default::default()); + } + + #[test] + fn test_to_price() { + assert_eq!(to_price("1").unwrap(), 1.0); + assert_eq!(to_price("2.3").unwrap(), 2.3); + assert_eq!(to_price("2.33").unwrap(), 2.33); + } + + #[test] + #[cfg(windows)] + fn test_geth_ipc_path() { + assert_eq!(geth_ipc_path(true), r"\\.\pipe\geth.ipc".to_owned()); + assert_eq!(geth_ipc_path(false), r"\\.\pipe\geth.ipc".to_owned()); + } + + #[test] + #[cfg(not(windows))] + fn test_geth_ipc_path() { + use util::path; + assert_eq!(geth_ipc_path(true), path::ethereum::with_testnet("geth.ipc").to_str().unwrap().to_owned()); + assert_eq!(geth_ipc_path(false), path::ethereum::with_default("geth.ipc").to_str().unwrap().to_owned()); + } + + #[test] + fn test_to_bootnodes() { + let one_bootnode = "enode://e731347db0521f3476e6bbbb83375dcd7133a1601425ebd15fd10f3835fd4c304fba6282087ca5a0deeafadf0aa0d4fd56c3323331901c1f38bd181c283e3e35@128.199.55.137:30303"; + let two_bootnodes = "enode://e731347db0521f3476e6bbbb83375dcd7133a1601425ebd15fd10f3835fd4c304fba6282087ca5a0deeafadf0aa0d4fd56c3323331901c1f38bd181c283e3e35@128.199.55.137:30303,enode://e731347db0521f3476e6bbbb83375dcd7133a1601425ebd15fd10f3835fd4c304fba6282087ca5a0deeafadf0aa0d4fd56c3323331901c1f38bd181c283e3e35@128.199.55.137:30303"; + + assert_eq!(to_bootnodes(&Some("".into())), Ok(vec![])); + assert_eq!(to_bootnodes(&None), Ok(vec![])); + assert_eq!(to_bootnodes(&Some(one_bootnode.into())), Ok(vec![one_bootnode.into()])); + assert_eq!(to_bootnodes(&Some(two_bootnodes.into())), Ok(vec![one_bootnode.into(), one_bootnode.into()])); + } +} + diff --git a/parity/main.rs b/parity/main.rs index 8406c3768..50b2d9c4b 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -31,10 +31,9 @@ extern crate ethsync; #[macro_use] extern crate log as rlog; extern crate env_logger; +extern crate ethcore_logger; extern crate ctrlc; extern crate fdlimit; -#[cfg(not(windows))] -extern crate daemonize; extern crate time; extern crate number_prefix; extern crate rpassword; @@ -53,15 +52,12 @@ extern crate ansi_term; #[macro_use] extern crate lazy_static; extern crate regex; -extern crate ethcore_logger; extern crate isatty; #[cfg(feature = "dapps")] extern crate ethcore_dapps; - -#[macro_use] -mod die; +mod cache; mod upgrade; mod rpc; mod dapps; @@ -73,529 +69,56 @@ mod migration; mod signer; mod rpc_apis; mod url; +mod helpers; +mod params; +mod deprecated; +mod dir; mod modules; +mod account; +mod blockchain; +mod presale; +mod run; -use std::io::{Write, Read, BufReader, BufRead}; -use std::ops::Deref; -use std::sync::Arc; -use std::path::Path; -use std::fs::File; -use std::str::{FromStr, from_utf8}; -use std::thread::sleep; -use std::time::Duration; -use rustc_serialize::hex::FromHex; -use ctrlc::CtrlC; -use util::{H256, ToPretty, PayloadInfo, Bytes, Colour, version, journaldb, RotatingLogger}; -use util::panics::{MayPanic, ForwardPanic, PanicHandler}; -use ethcore::client::{BlockID, BlockChainClient, ClientConfig, get_db_path, BlockImportError, Mode}; -use ethcore::error::{ImportError}; -use ethcore::service::ClientService; -use ethcore::spec::Spec; -use ethsync::{NetworkConfiguration}; -use ethcore::miner::{Miner, MinerService, ExternalMiner}; -use migration::migrate; -use informant::Informant; -use util::{Mutex, Condvar}; -use ethcore_logger::setup_log; -#[cfg(feature="ipc")] -use ethcore::client::ChainNotify; - -use die::*; +use std::{process, env}; use cli::print_version; -use rpc::RpcServer; -use signer::{SignerServer, new_token}; -use dapps::WebappServer; -use io_handler::ClientIoHandler; -use configuration::{Configuration}; +use configuration::{Cmd, Configuration}; +use deprecated::find_deprecated; + +fn execute(command: Cmd) -> Result { + match command { + Cmd::Run(run_cmd) => { + try!(run::execute(run_cmd)); + Ok("".into()) + }, + Cmd::Version => Ok(print_version()), + Cmd::Account(account_cmd) => account::execute(account_cmd), + Cmd::ImportPresaleWallet(presale_cmd) => presale::execute(presale_cmd), + Cmd::Blockchain(blockchain_cmd) => blockchain::execute(blockchain_cmd), + Cmd::SignerToken(path) => signer::new_token(path), + } +} + +fn start() -> Result { + let conf = Configuration::parse(env::args()).unwrap_or_else(|e| e.exit()); + + let deprecated = find_deprecated(&conf.args); + for d in deprecated { + println!("{}", d); + } + + let cmd = try!(conf.into_command()); + execute(cmd) +} fn main() { - let conf = Configuration::parse(); - execute(conf); -} - -fn execute(conf: Configuration) { - if conf.args.flag_version { - print_version(); - return; - } - - if conf.args.cmd_signer { - execute_signer(conf); - return; - } - - let spec = conf.spec(); - let client_config = conf.client_config(&spec); - - execute_upgrades(&conf, &spec, &client_config); - - if conf.args.cmd_daemon { - daemonize(&conf); - } - - // Setup panic handler - let panic_handler = PanicHandler::new_in_arc(); - // Setup logging - let logger = setup_log(&conf.log_settings()); - // Raise fdlimit - unsafe { ::fdlimit::raise_fd_limit(); } - - if conf.args.cmd_account { - execute_account_cli(conf); - return; - } - - if conf.args.cmd_wallet { - execute_wallet_cli(conf); - return; - } - - if conf.args.cmd_export { - execute_export(conf, panic_handler); - return; - } - - if conf.args.cmd_import { - execute_import(conf, panic_handler); - return; - } - - execute_client(conf, spec, client_config, panic_handler, logger); -} - -#[cfg(not(windows))] -fn daemonize(conf: &Configuration) { - use daemonize::Daemonize; - Daemonize::new() - .pid_file(conf.args.arg_pid_file.clone()) - .chown_pid_file(true) - .start() - .unwrap_or_else(|e| die!("Couldn't daemonize; {}", e)); -} - -#[cfg(windows)] -fn daemonize(_conf: &Configuration) { -} - -fn execute_upgrades(conf: &Configuration, spec: &Spec, client_config: &ClientConfig) { - match ::upgrade::upgrade(Some(&conf.path())) { - Ok(upgrades_applied) if upgrades_applied > 0 => { - debug!("Executed {} upgrade scripts - ok", upgrades_applied); + match start() { + Ok(result) => { + print!("{}", result); }, - Err(e) => { - die!("Error upgrading parity data: {:?}", e); - }, - _ => {}, - } - - let db_path = get_db_path(Path::new(&conf.path()), client_config.pruning, spec.genesis_header().hash(), spec.fork_name.as_ref()); - let result = migrate(&db_path, client_config.pruning); - if let Err(err) = result { - die_with_message(&format!("{} DB path: {}", err, db_path.to_string_lossy())); - } -} - -fn execute_client(conf: Configuration, spec: Spec, client_config: ClientConfig, panic_handler: Arc, logger: Arc) { - let mut hypervisor = modules::hypervisor(); - - info!("Starting {}", Colour::White.bold().paint(format!("{}", version()))); - info!("Using state DB journalling strategy {}", Colour::White.bold().paint(match client_config.pruning { - journaldb::Algorithm::Archive => "archive", - journaldb::Algorithm::EarlyMerge => "light", - journaldb::Algorithm::OverlayRecent => "fast", - journaldb::Algorithm::RefCounted => "basic", - })); - - // Display warning about using experimental journaldb types - match client_config.pruning { - journaldb::Algorithm::EarlyMerge | journaldb::Algorithm::RefCounted => { - warn!("Your chosen strategy is {}! You can re-run with --pruning to change.", Colour::Red.bold().paint("unstable")); - } - _ => {} - } - - // Display warning about using unlock with signer - if conf.signer_enabled() && conf.args.flag_unlock.is_some() { - warn!("Using Trusted Signer and --unlock is not recommended!"); - warn!("NOTE that Signer will not ask you to confirm transactions from unlocked account."); - } - - let net_settings = conf.net_settings(&spec); - let sync_config = conf.sync_config(&spec); - - // Secret Store - let account_service = Arc::new(conf.account_service()); - - // Miner - let miner = Miner::new(conf.miner_options(), conf.gas_pricer(), conf.spec(), Some(account_service.clone())); - miner.set_author(conf.author().unwrap_or_default()); - miner.set_gas_floor_target(conf.gas_floor_target()); - miner.set_gas_ceil_target(conf.gas_ceil_target()); - miner.set_extra_data(conf.extra_data()); - miner.set_transactions_limit(conf.args.flag_tx_queue_size); - - // Build client - let service = ClientService::start( - client_config, - spec, - Path::new(&conf.path()), - miner.clone(), - ).unwrap_or_else(|e| die_with_error("Client", e)); - - panic_handler.forward_from(&service); - let client = service.client(); - - let external_miner = Arc::new(ExternalMiner::default()); - let network_settings = Arc::new(conf.network_settings()); - - // Sync - let (sync_provider, manage_network, chain_notify) = - modules::sync(&mut hypervisor, sync_config, NetworkConfiguration::from(net_settings), client.clone(), &conf.log_settings()) - .unwrap_or_else(|e| die_with_error("Sync", e)); - - service.add_notify(chain_notify.clone()); - - // if network is active by default - if match conf.mode() { Mode::Dark(..) => false, _ => !conf.args.flag_no_network } { - chain_notify.start(); - } - - let deps_for_rpc_apis = Arc::new(rpc_apis::Dependencies { - signer_port: conf.signer_port(), - signer_queue: Arc::new(rpc_apis::ConfirmationsQueue::default()), - client: client.clone(), - sync: sync_provider.clone(), - net: manage_network.clone(), - secret_store: account_service.clone(), - miner: miner.clone(), - external_miner: external_miner.clone(), - logger: logger.clone(), - settings: network_settings.clone(), - allow_pending_receipt_query: !conf.args.flag_geth, - net_service: manage_network.clone(), - }); - - let dependencies = rpc::Dependencies { - panic_handler: panic_handler.clone(), - apis: deps_for_rpc_apis.clone(), - }; - - // Setup http rpc - let rpc_server = rpc::new_http(rpc::HttpConfiguration { - enabled: network_settings.rpc_enabled, - interface: conf.rpc_interface(), - port: network_settings.rpc_port, - apis: conf.rpc_apis(), - cors: conf.rpc_cors(), - hosts: conf.rpc_hosts(), - }, &dependencies); - - // setup ipc rpc - let _ipc_server = rpc::new_ipc(conf.ipc_settings(), &dependencies); - debug!("IPC: {}", conf.ipc_settings()); - - if conf.args.flag_webapp { println!("WARNING: Flag -w/--webapp is deprecated. Dapps server is now on by default. Ignoring."); } - let dapps_server = dapps::new(dapps::Configuration { - enabled: conf.dapps_enabled(), - interface: conf.dapps_interface(), - port: conf.args.flag_dapps_port, - user: conf.args.flag_dapps_user.clone(), - pass: conf.args.flag_dapps_pass.clone(), - dapps_path: conf.directories().dapps, - }, dapps::Dependencies { - panic_handler: panic_handler.clone(), - apis: deps_for_rpc_apis.clone(), - }); - - // Set up a signer - let signer_server = signer::start(signer::Configuration { - enabled: conf.signer_enabled(), - port: conf.args.flag_signer_port, - signer_path: conf.directories().signer, - }, signer::Dependencies { - panic_handler: panic_handler.clone(), - apis: deps_for_rpc_apis.clone(), - }); - - let informant = Arc::new(Informant::new(service.client(), Some(sync_provider.clone()), Some(manage_network.clone()), conf.have_color())); - service.add_notify(informant.clone()); - // Register IO handler - let io_handler = Arc::new(ClientIoHandler { - client: service.client(), - info: informant, - sync: sync_provider.clone(), - net: manage_network.clone(), - accounts: account_service.clone(), - }); - service.register_io_handler(io_handler).expect("Error registering IO handler"); - - if conf.args.cmd_ui { - if !conf.dapps_enabled() { - die_with_message("Cannot use UI command with Dapps turned off."); - } - url::open(&format!("http://{}:{}/", conf.dapps_interface(), conf.args.flag_dapps_port)); - } - - // Handle exit - wait_for_exit(panic_handler, rpc_server, dapps_server, signer_server); -} - -fn flush_stdout() { - ::std::io::stdout().flush().expect("stdout is flushable; qed"); -} - -enum DataFormat { - Hex, - Binary, -} - -fn execute_export(conf: Configuration, panic_handler: Arc) { - let spec = conf.spec(); - let client_config = conf.client_config(&spec); - - // Build client - let service = ClientService::start( - client_config, spec, Path::new(&conf.path()), Arc::new(Miner::with_spec(conf.spec())) - ).unwrap_or_else(|e| die_with_error("Client", e)); - - panic_handler.forward_from(&service); - let client = service.client(); - - // we have a client! - let parse_block_id = |s: &str, arg: &str| -> u64 { - if s == "latest" { - client.chain_info().best_block_number - } else if let Ok(n) = s.parse::() { - n - } else if let Ok(h) = H256::from_str(s) { - client.block_number(BlockID::Hash(h)).unwrap_or_else(|| { - die!("Unknown block hash passed to {} parameter: {:?}", arg, s); - }) - } else { - die!("Invalid {} parameter given: {:?}", arg, s); - } - }; - let from = parse_block_id(&conf.args.flag_from, "--from"); - let to = parse_block_id(&conf.args.flag_to, "--to"); - let format = match conf.args.flag_format { - Some(x) => match x.deref() { - "binary" | "bin" => DataFormat::Binary, - "hex" => DataFormat::Hex, - x => die!("Invalid --format parameter given: {:?}", x), - }, - None if conf.args.arg_file.is_none() => DataFormat::Hex, - None => DataFormat::Binary, - }; - - let mut out: Box = if let Some(f) = conf.args.arg_file { - Box::new(File::create(&f).unwrap_or_else(|_| die!("Cannot write to file given: {}", f))) - } else { - Box::new(::std::io::stdout()) - }; - - for i in from..(to + 1) { - let b = client.deref().block(BlockID::Number(i)).unwrap(); - match format { - DataFormat::Binary => { out.write(&b).expect("Couldn't write to stream."); } - DataFormat::Hex => { out.write_fmt(format_args!("{}", b.pretty())).expect("Couldn't write to stream."); } + Err(err) => { + print!("{}", err); + process::exit(1); } } } -fn execute_import(conf: Configuration, panic_handler: Arc) { - let spec = conf.spec(); - let client_config = conf.client_config(&spec); - - // Build client - let service = ClientService::start( - client_config, spec, Path::new(&conf.path()), Arc::new(Miner::with_spec(conf.spec())) - ).unwrap_or_else(|e| die_with_error("Client", e)); - - panic_handler.forward_from(&service); - let client = service.client(); - - let mut instream: Box = if let Some(ref f) = conf.args.arg_file { - let f = File::open(f).unwrap_or_else(|_| die!("Cannot open the file given: {}", f)); - Box::new(f) - } else { - Box::new(::std::io::stdin()) - }; - - const READAHEAD_BYTES: usize = 8; - - let mut first_bytes: Bytes = vec![0; READAHEAD_BYTES]; - let mut first_read = 0; - - let format = match conf.args.flag_format { - Some(ref x) => match x.deref() { - "binary" | "bin" => DataFormat::Binary, - "hex" => DataFormat::Hex, - x => die!("Invalid --format parameter given: {:?}", x), - }, - None => { - // autodetect... - first_read = instream.read(&mut(first_bytes[..])).unwrap_or_else(|_| die!("Error reading from the file/stream.")); - match first_bytes[0] { - 0xf9 => { - info!("Autodetected binary data format."); - DataFormat::Binary - } - _ => { - info!("Autodetected hex data format."); - DataFormat::Hex - } - } - } - }; - - let informant = Informant::new(client.clone(), None, None, conf.have_color()); - - let do_import = |bytes| { - while client.queue_info().is_full() { sleep(Duration::from_secs(1)); } - match client.import_block(bytes) { - Ok(_) => {} - Err(BlockImportError::Import(ImportError::AlreadyInChain)) => { trace!("Skipping block already in chain."); } - Err(e) => die!("Cannot import block: {:?}", e) - } - informant.tick(); - }; - - match format { - DataFormat::Binary => { - loop { - let mut bytes: Bytes = if first_read > 0 {first_bytes.clone()} else {vec![0; READAHEAD_BYTES]}; - let n = if first_read > 0 {first_read} else {instream.read(&mut(bytes[..])).unwrap_or_else(|_| die!("Error reading from the file/stream."))}; - if n == 0 { break; } - first_read = 0; - let s = PayloadInfo::from(&(bytes[..])).unwrap_or_else(|e| die!("Invalid RLP in the file/stream: {:?}", e)).total(); - bytes.resize(s, 0); - instream.read_exact(&mut(bytes[READAHEAD_BYTES..])).unwrap_or_else(|_| die!("Error reading from the file/stream.")); - do_import(bytes); - } - } - DataFormat::Hex => { - for line in BufReader::new(instream).lines() { - let s = line.unwrap_or_else(|_| die!("Error reading from the file/stream.")); - let s = if first_read > 0 {from_utf8(&first_bytes).unwrap().to_owned() + &(s[..])} else {s}; - first_read = 0; - let bytes = FromHex::from_hex(&(s[..])).unwrap_or_else(|_| die!("Invalid hex in file/stream.")); - do_import(bytes); - } - } - } - while !client.queue_info().is_empty() { - sleep(Duration::from_secs(1)); - informant.tick(); - } - client.flush_queue(); -} - -fn execute_signer(conf: Configuration) { - if !conf.args.cmd_new_token { - die!("Unknown command."); - } - - let path = conf.directories().signer; - let code = new_token(path).unwrap_or_else(|e| { - die!("Error generating token: {:?}", e) - }); - println!("This key code will authorise your System Signer UI: {}", if conf.args.flag_no_color { code } else { format!("{}", Colour::White.bold().paint(code)) }); -} - -fn execute_account_cli(conf: Configuration) { - use ethcore::ethstore::{EthStore, import_accounts}; - use ethcore::ethstore::dir::DiskDirectory; - use ethcore::account_provider::AccountProvider; - use rpassword::read_password; - - let dir = Box::new(DiskDirectory::create(conf.keys_path()).unwrap()); - let iterations = conf.keys_iterations(); - let secret_store = AccountProvider::new(Box::new(EthStore::open_with_iterations(dir, iterations).unwrap())); - - if conf.args.cmd_new { - println!("Please note that password is NOT RECOVERABLE."); - print!("Type password: "); - flush_stdout(); - let password = read_password().unwrap(); - print!("Repeat password: "); - flush_stdout(); - let password_repeat = read_password().unwrap(); - if password != password_repeat { - println!("Passwords do not match!"); - return; - } - println!("New account address:"); - let new_address = secret_store.new_account(&password).unwrap(); - println!("{:?}", new_address); - return; - } - - if conf.args.cmd_list { - println!("Known addresses:"); - for addr in &secret_store.accounts() { - println!("{:?}", addr); - } - return; - } - - if conf.args.cmd_import { - let to = DiskDirectory::create(conf.keys_path()).unwrap(); - let mut imported = 0; - for path in &conf.args.arg_path { - let from = DiskDirectory::at(path); - imported += import_accounts(&from, &to).unwrap_or_else(|e| die!("Could not import accounts {}", e)).len(); - } - println!("Imported {} keys", imported); - } -} - -fn execute_wallet_cli(conf: Configuration) { - use ethcore::ethstore::{PresaleWallet, EthStore}; - use ethcore::ethstore::dir::DiskDirectory; - use ethcore::account_provider::AccountProvider; - - let wallet_path = conf.args.arg_path.first().unwrap(); - let filename = conf.args.flag_password.first().unwrap(); - let mut file = File::open(filename).unwrap_or_else(|_| die!("{} Unable to read password file.", filename)); - let mut file_content = String::new(); - file.read_to_string(&mut file_content).unwrap_or_else(|_| die!("{} Unable to read password file.", filename)); - - let dir = Box::new(DiskDirectory::create(conf.keys_path()).unwrap()); - let iterations = conf.keys_iterations(); - let store = AccountProvider::new(Box::new(EthStore::open_with_iterations(dir, iterations).unwrap())); - - // remove eof - let pass = &file_content[..file_content.len() - 1]; - let wallet = PresaleWallet::open(wallet_path).unwrap_or_else(|_| die!("Unable to open presale wallet.")); - let kp = wallet.decrypt(pass).unwrap_or_else(|_| die!("Invalid password")); - let address = store.insert_account(kp.secret().clone(), pass).unwrap(); - - println!("Imported account: {}", address); -} - -fn wait_for_exit( - panic_handler: Arc, - _rpc_server: Option, - _dapps_server: Option, - _signer_server: Option - ) { - let exit = Arc::new(Condvar::new()); - - // Handle possible exits - let e = exit.clone(); - CtrlC::set_handler(move || { e.notify_all(); }); - - // Handle panics - let e = exit.clone(); - panic_handler.on_panic(move |_reason| { e.notify_all(); }); - - // Wait for signal - let mutex = Mutex::new(()); - exit.wait(&mut mutex.lock()); - info!("Finishing work, please wait..."); -} - -/// Parity needs at least 1 test to generate coverage reports correctly. -#[test] -fn if_works() { -} diff --git a/parity/modules.rs b/parity/modules.rs index f7b14dd54..de8e68027 100644 --- a/parity/modules.rs +++ b/parity/modules.rs @@ -23,8 +23,7 @@ use ethsync::{SyncConfig, NetworkConfiguration}; use self::no_ipc_deps::*; #[cfg(feature="ipc")] use self::ipc_deps::*; - -use ethcore_logger::Settings as LogSettings; +use ethcore_logger::Config as LogConfig; #[cfg(not(feature="ipc"))] mod no_ipc_deps { @@ -64,7 +63,7 @@ pub fn hypervisor() -> Option { } #[cfg(feature="ipc")] -fn sync_arguments(sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, log_settings: &LogSettings) -> BootArgs { +fn sync_arguments(sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, log_settings: &LogConfig) -> BootArgs { let service_config = ServiceConfiguration { sync: sync_cfg, net: net_cfg, @@ -96,7 +95,7 @@ pub fn sync sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, _client: Arc, - log_settings: &LogSettings, + log_settings: &LogConfig, ) -> Result { @@ -121,7 +120,7 @@ pub fn sync sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, client: Arc, - _log_settings: &LogSettings, + _log_settings: &LogConfig, ) -> Result { diff --git a/parity/params.rs b/parity/params.rs new file mode 100644 index 000000000..c48afa37d --- /dev/null +++ b/parity/params.rs @@ -0,0 +1,276 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::str::FromStr; +use std::time::Duration; +use util::{contents, DatabaseConfig, journaldb, H256, Address, U256, version_data}; +use util::journaldb::Algorithm; +use ethcore::spec::Spec; +use ethcore::ethereum; +use ethcore::miner::{GasPricer, GasPriceCalibratorOptions}; +use dir::Directories; + +#[derive(Debug, PartialEq)] +pub enum SpecType { + Mainnet, + Testnet, + Olympic, + Classic, + Custom(String), +} + +impl Default for SpecType { + fn default() -> Self { + SpecType::Mainnet + } +} + +impl FromStr for SpecType { + type Err = String; + + fn from_str(s: &str) -> Result { + let spec = match s { + "frontier" | "homestead" | "mainnet" => SpecType::Mainnet, + "frontier-dogmatic" | "homestead-dogmatic" | "classic" => SpecType::Classic, + "morden" | "testnet" => SpecType::Testnet, + "olympic" => SpecType::Olympic, + other => SpecType::Custom(other.into()), + }; + Ok(spec) + } +} + +impl SpecType { + pub fn spec(&self) -> Result { + match *self { + SpecType::Mainnet => Ok(ethereum::new_frontier()), + SpecType::Testnet => Ok(ethereum::new_morden()), + SpecType::Olympic => Ok(ethereum::new_olympic()), + SpecType::Classic => Ok(ethereum::new_classic()), + SpecType::Custom(ref file) => Ok(Spec::load(&try!(contents(file).map_err(|_| "Could not load specification file.")))) + } + } +} + +#[derive(Debug, PartialEq)] +pub enum Pruning { + Specific(Algorithm), + Auto, +} + +impl Default for Pruning { + fn default() -> Self { + Pruning::Auto + } +} + +impl FromStr for Pruning { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "auto" => Ok(Pruning::Auto), + other => other.parse().map(Pruning::Specific), + } + } +} + +impl Pruning { + pub fn to_algorithm(&self, dirs: &Directories, genesis_hash: H256, fork_name: Option<&String>) -> Algorithm { + match *self { + Pruning::Specific(algo) => algo, + Pruning::Auto => Self::find_best_db(dirs, genesis_hash, fork_name), + } + } + + fn find_best_db(dirs: &Directories, genesis_hash: H256, fork_name: Option<&String>) -> Algorithm { + let mut algo_types = Algorithm::all_types(); + + // if all dbs have the same latest era, the last element is the default one + algo_types.push(Algorithm::default()); + + algo_types.into_iter().max_by_key(|i| { + let mut client_path = dirs.client_path(genesis_hash, fork_name, *i); + client_path.push("state"); + let db = journaldb::new(client_path.to_str().unwrap(), *i, DatabaseConfig::default()); + trace!(target: "parity", "Looking for best DB: {} at {:?}", i, db.latest_era()); + db.latest_era() + }).unwrap() + } +} + +#[derive(Debug, PartialEq)] +pub struct ResealPolicy { + pub own: bool, + pub external: bool, +} + +impl Default for ResealPolicy { + fn default() -> Self { + ResealPolicy { + own: true, + external: true, + } + } +} + +impl FromStr for ResealPolicy { + type Err = String; + + fn from_str(s: &str) -> Result { + let (own, external) = match s { + "none" => (false, false), + "own" => (true, false), + "ext" => (false, true), + "all" => (true, true), + x => return Err(format!("Invalid reseal value: {}", x)), + }; + + let reseal = ResealPolicy { + own: own, + external: external, + }; + + Ok(reseal) + } +} + +#[derive(Debug, PartialEq)] +pub struct AccountsConfig { + pub iterations: u32, + pub import_keys: bool, + pub testnet: bool, + pub password_files: Vec, + pub unlocked_accounts: Vec

, +} + +impl Default for AccountsConfig { + fn default() -> Self { + AccountsConfig { + iterations: 10240, + import_keys: true, + testnet: false, + password_files: Vec::new(), + unlocked_accounts: Vec::new(), + } + } +} + +#[derive(Debug, PartialEq)] +pub enum GasPricerConfig { + Fixed(U256), + Calibrated { + usd_per_tx: f32, + recalibration_period: Duration, + } +} + +impl Default for GasPricerConfig { + fn default() -> Self { + GasPricerConfig::Calibrated { + usd_per_tx: 0.005, + recalibration_period: Duration::from_secs(3600), + } + } +} + +impl Into for GasPricerConfig { + fn into(self) -> GasPricer { + match self { + GasPricerConfig::Fixed(u) => GasPricer::Fixed(u), + GasPricerConfig::Calibrated { usd_per_tx, recalibration_period } => { + GasPricer::new_calibrated(GasPriceCalibratorOptions { + usd_per_tx: usd_per_tx, + recalibration_period: recalibration_period, + }) + } + } + } +} + +#[derive(Debug, PartialEq)] +pub struct MinerExtras { + pub author: Address, + pub extra_data: Vec, + pub gas_floor_target: U256, + pub gas_ceil_target: U256, + pub transactions_limit: usize, +} + +impl Default for MinerExtras { + fn default() -> Self { + MinerExtras { + author: Default::default(), + extra_data: version_data(), + gas_floor_target: U256::from(4_700_000), + gas_ceil_target: U256::from(6_283_184), + transactions_limit: 1024, + } + } +} + +#[cfg(test)] +mod tests { + use util::journaldb::Algorithm; + use super::{SpecType, Pruning, ResealPolicy}; + + #[test] + fn test_spec_type_parsing() { + assert_eq!(SpecType::Mainnet, "frontier".parse().unwrap()); + assert_eq!(SpecType::Mainnet, "homestead".parse().unwrap()); + assert_eq!(SpecType::Mainnet, "mainnet".parse().unwrap()); + assert_eq!(SpecType::Testnet, "testnet".parse().unwrap()); + assert_eq!(SpecType::Testnet, "morden".parse().unwrap()); + assert_eq!(SpecType::Olympic, "olympic".parse().unwrap()); + } + + #[test] + fn test_spec_type_default() { + assert_eq!(SpecType::Mainnet, SpecType::default()); + } + + #[test] + fn test_pruning_parsing() { + assert_eq!(Pruning::Auto, "auto".parse().unwrap()); + assert_eq!(Pruning::Specific(Algorithm::Archive), "archive".parse().unwrap()); + assert_eq!(Pruning::Specific(Algorithm::EarlyMerge), "light".parse().unwrap()); + assert_eq!(Pruning::Specific(Algorithm::OverlayRecent), "fast".parse().unwrap()); + assert_eq!(Pruning::Specific(Algorithm::RefCounted), "basic".parse().unwrap()); + } + + #[test] + fn test_pruning_default() { + assert_eq!(Pruning::Auto, Pruning::default()); + } + + #[test] + fn test_reseal_policy_parsing() { + let none = ResealPolicy { own: false, external: false }; + let own = ResealPolicy { own: true, external: false }; + let ext = ResealPolicy { own: false, external: true }; + let all = ResealPolicy { own: true, external: true }; + assert_eq!(none, "none".parse().unwrap()); + assert_eq!(own, "own".parse().unwrap()); + assert_eq!(ext, "ext".parse().unwrap()); + assert_eq!(all, "all".parse().unwrap()); + } + + #[test] + fn test_reseal_policy_default() { + let all = ResealPolicy { own: true, external: true }; + assert_eq!(all, ResealPolicy::default()); + } +} diff --git a/parity/presale.rs b/parity/presale.rs new file mode 100644 index 000000000..51d9cd37f --- /dev/null +++ b/parity/presale.rs @@ -0,0 +1,43 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use ethcore::ethstore::{PresaleWallet, EthStore}; +use ethcore::ethstore::dir::DiskDirectory; +use ethcore::account_provider::AccountProvider; +use helpers::{password_prompt, password_from_file}; + +#[derive(Debug, PartialEq)] +pub struct ImportWallet { + pub iterations: u32, + pub path: String, + pub wallet_path: String, + pub password_file: Option, +} + +pub fn execute(cmd: ImportWallet) -> Result { + let password: String = match cmd.password_file { + Some(file) => try!(password_from_file(file)), + None => try!(password_prompt()), + }; + + let dir = Box::new(DiskDirectory::create(cmd.path).unwrap()); + let secret_store = Box::new(EthStore::open_with_iterations(dir, cmd.iterations).unwrap()); + let acc_provider = AccountProvider::new(secret_store); + let wallet = try!(PresaleWallet::open(cmd.wallet_path).map_err(|_| "Unable to open presale wallet.")); + let kp = try!(wallet.decrypt(&password).map_err(|_| "Invalid password.")); + let address = acc_provider.insert_account(kp.secret().clone(), &password).unwrap(); + Ok(format!("{:?}", address)) +} diff --git a/parity/rpc.rs b/parity/rpc.rs index 2b0599962..b30529c1c 100644 --- a/parity/rpc.rs +++ b/parity/rpc.rs @@ -14,40 +14,64 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . - -use std::str::FromStr; +use std::fmt; use std::sync::Arc; use std::net::SocketAddr; use util::panics::PanicHandler; -use die::*; +use ethcore_rpc::{RpcServerError, RpcServer as Server}; use jsonipc; use rpc_apis; -use std::fmt; +use rpc_apis::ApiSet; +use helpers::parity_ipc_path; -pub use ethcore_rpc::Server as RpcServer; -use ethcore_rpc::{RpcServerError, RpcServer as Server}; +pub use jsonipc::Server as IpcServer; +pub use ethcore_rpc::Server as HttpServer; +#[derive(Debug, PartialEq)] pub struct HttpConfiguration { pub enabled: bool, pub interface: String, pub port: u16, - pub apis: String, + pub apis: ApiSet, pub cors: Option>, pub hosts: Option>, } +impl Default for HttpConfiguration { + fn default() -> Self { + HttpConfiguration { + enabled: true, + interface: "127.0.0.1".into(), + port: 8545, + apis: ApiSet::UnsafeContext, + cors: None, + hosts: Some(Vec::new()), + } + } +} + +#[derive(Debug, PartialEq)] pub struct IpcConfiguration { pub enabled: bool, pub socket_addr: String, - pub apis: String, + pub apis: ApiSet, +} + +impl Default for IpcConfiguration { + fn default() -> Self { + IpcConfiguration { + enabled: true, + socket_addr: parity_ipc_path("$HOME/.parity/jsonrpc.ipc"), + apis: ApiSet::UnsafeContext, + } + } } impl fmt::Display for IpcConfiguration { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.enabled { - write!(f, "endpoint address [{}], api list [{}]", self.socket_addr, self.apis) - } - else { + write!(f, "endpoint address [{}], api list [{:?}]", self.socket_addr, self.apis) + } else { write!(f, "disabled") } } @@ -58,22 +82,19 @@ pub struct Dependencies { pub apis: Arc, } -pub fn new_http(conf: HttpConfiguration, deps: &Dependencies) -> Option { +pub fn new_http(conf: HttpConfiguration, deps: &Dependencies) -> Result, String> { if !conf.enabled { - return None; + return Ok(None); } - let apis = conf.apis.split(',').collect(); let url = format!("{}:{}", conf.interface, conf.port); - let addr = SocketAddr::from_str(&url).unwrap_or_else(|_| die!("{}: Invalid JSONRPC listen host/port given.", url)); - - Some(setup_http_rpc_server(deps, &addr, conf.cors, conf.hosts, apis)) + let addr = try!(url.parse().map_err(|_| format!("Invalid JSONRPC listen host/port given: {}", url))); + Ok(Some(try!(setup_http_rpc_server(deps, &addr, conf.cors, conf.hosts, conf.apis)))) } -fn setup_rpc_server(apis: Vec<&str>, deps: &Dependencies) -> Server { - let apis = rpc_apis::from_str(apis); +fn setup_rpc_server(apis: ApiSet, deps: &Dependencies) -> Result { let server = Server::new(); - rpc_apis::setup_rpc(server, deps.apis.clone(), rpc_apis::ApiSet::List(apis)) + Ok(rpc_apis::setup_rpc(server, deps.apis.clone(), apis)) } pub fn setup_http_rpc_server( @@ -81,29 +102,28 @@ pub fn setup_http_rpc_server( url: &SocketAddr, cors_domains: Option>, allowed_hosts: Option>, - apis: Vec<&str>, -) -> RpcServer { - let server = setup_rpc_server(apis, dependencies); + apis: ApiSet +) -> Result { + let server = try!(setup_rpc_server(apis, dependencies)); let ph = dependencies.panic_handler.clone(); let start_result = server.start_http(url, cors_domains, allowed_hosts, ph); match start_result { - Err(RpcServerError::IoError(err)) => die_with_io_error("RPC", err), - Err(e) => die!("RPC: {:?}", e), - Ok(server) => server, + Err(RpcServerError::IoError(err)) => Err(format!("RPC io error: {}", err)), + Err(e) => Err(format!("RPC error: {:?}", e)), + Ok(server) => Ok(server), } } -pub fn new_ipc(conf: IpcConfiguration, deps: &Dependencies) -> Option { - if !conf.enabled { return None; } - let apis = conf.apis.split(',').collect(); - Some(setup_ipc_rpc_server(deps, &conf.socket_addr, apis)) +pub fn new_ipc(conf: IpcConfiguration, deps: &Dependencies) -> Result, String> { + if !conf.enabled { return Ok(None); } + Ok(Some(try!(setup_ipc_rpc_server(deps, &conf.socket_addr, conf.apis)))) } -pub fn setup_ipc_rpc_server(dependencies: &Dependencies, addr: &str, apis: Vec<&str>) -> jsonipc::Server { - let server = setup_rpc_server(apis, dependencies); +pub fn setup_ipc_rpc_server(dependencies: &Dependencies, addr: &str, apis: ApiSet) -> Result { + let server = try!(setup_rpc_server(apis, dependencies)); match server.start_ipc(addr) { - Err(jsonipc::Error::Io(io_error)) => die_with_io_error("RPC", io_error), - Err(any_error) => die!("RPC: {:?}", any_error), - Ok(server) => server + Err(jsonipc::Error::Io(io_error)) => Err(format!("RPC io error: {}", io_error)), + Err(any_error) => Err(format!("Rpc error: {:?}", any_error)), + Ok(server) => Ok(server) } } diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index c2146795b..66a59a86b 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -15,20 +15,21 @@ // along with Parity. If not, see . use std::collections::BTreeMap; +use std::collections::HashSet; +use std::cmp::PartialEq; use std::str::FromStr; use std::sync::Arc; - -use ethsync::{ManageNetwork, SyncProvider}; +use util::RotatingLogger; +use util::network_settings::NetworkSettings; use ethcore::miner::{Miner, ExternalMiner}; use ethcore::client::Client; -use util::RotatingLogger; use ethcore::account_provider::AccountProvider; -use util::network_settings::NetworkSettings; - +use ethsync::{ManageNetwork, SyncProvider}; +use ethcore_rpc::Extendable; pub use ethcore_rpc::ConfirmationsQueue; -use ethcore_rpc::Extendable; +#[derive(Debug, PartialEq, Clone, Eq, Hash)] pub enum Api { Web3, Net, @@ -41,18 +42,8 @@ pub enum Api { Rpc, } -pub enum ApiError { - UnknownApi(String) -} - -pub enum ApiSet { - SafeContext, - UnsafeContext, - List(Vec), -} - impl FromStr for Api { - type Err = ApiError; + type Err = String; fn from_str(s: &str) -> Result { use self::Api::*; @@ -67,11 +58,41 @@ impl FromStr for Api { "ethcore_set" => Ok(EthcoreSet), "traces" => Ok(Traces), "rpc" => Ok(Rpc), - e => Err(ApiError::UnknownApi(e.into())), + api => Err(format!("Unknown api: {}", api)) } } } +#[derive(Debug)] +pub enum ApiSet { + SafeContext, + UnsafeContext, + List(HashSet), +} + +impl Default for ApiSet { + fn default() -> Self { + ApiSet::UnsafeContext + } +} + +impl PartialEq for ApiSet { + fn eq(&self, other: &Self) -> bool { + self.list_apis() == other.list_apis() + } +} + +impl FromStr for ApiSet { + type Err = String; + + fn from_str(s: &str) -> Result { + s.split(',') + .map(Api::from_str) + .collect::>() + .map(ApiSet::List) + } +} + pub struct Dependencies { pub signer_port: Option, pub signer_queue: Arc, @@ -106,31 +127,27 @@ fn to_modules(apis: &[Api]) -> BTreeMap { modules } -pub fn from_str(apis: Vec<&str>) -> Vec { - apis.into_iter() - .map(Api::from_str) - .collect::, ApiError>>() - .unwrap_or_else(|e| match e { - ApiError::UnknownApi(s) => die!("Unknown RPC API specified: {}", s), - }) -} - -fn list_apis(apis: ApiSet) -> Vec { - match apis { - ApiSet::List(apis) => apis, - ApiSet::UnsafeContext => { - vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Ethcore, Api::Traces, Api::Rpc] - }, - _ => { - vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Signer, Api::Ethcore, Api::EthcoreSet, Api::Traces, Api::Rpc] - }, +impl ApiSet { + pub fn list_apis(&self) -> HashSet { + match *self { + ApiSet::List(ref apis) => apis.clone(), + ApiSet::UnsafeContext => { + vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Ethcore, Api::Traces, Api::Rpc] + .into_iter().collect() + }, + _ => { + vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Signer, Api::Ethcore, Api::EthcoreSet, Api::Traces, Api::Rpc] + .into_iter().collect() + }, + } } } pub fn setup_rpc(server: T, deps: Arc, apis: ApiSet) -> T { use ethcore_rpc::v1::*; - let apis = list_apis(apis); + // it's turned into vector, cause ont of the cases requires &[] + let apis = apis.list_apis().into_iter().collect::>(); for api in &apis { match *api { Api::Web3 => { @@ -140,8 +157,18 @@ pub fn setup_rpc(server: T, deps: Arc, apis: ApiSet server.add_delegate(NetClient::new(&deps.sync).to_delegate()); }, Api::Eth => { - server.add_delegate(EthClient::new(&deps.client, &deps.sync, &deps.secret_store, &deps.miner, &deps.external_miner, deps.allow_pending_receipt_query).to_delegate()); - server.add_delegate(EthFilterClient::new(&deps.client, &deps.miner).to_delegate()); + let client = EthClient::new( + &deps.client, + &deps.sync, + &deps.secret_store, + &deps.miner, + &deps.external_miner, + deps.allow_pending_receipt_query + ); + server.add_delegate(client.to_delegate()); + + let filter_client = EthFilterClient::new(&deps.client, &deps.miner); + server.add_delegate(filter_client.to_delegate()); if deps.signer_port.is_some() { server.add_delegate(EthSigningQueueClient::new(&deps.signer_queue, &deps.client, &deps.miner, &deps.secret_store).to_delegate()); @@ -173,3 +200,46 @@ pub fn setup_rpc(server: T, deps: Arc, apis: ApiSet } server } + +#[cfg(test)] +mod test { + use super::{Api, ApiSet}; + + #[test] + fn test_api_parsing() { + assert_eq!(Api::Web3, "web3".parse().unwrap()); + assert_eq!(Api::Net, "net".parse().unwrap()); + assert_eq!(Api::Eth, "eth".parse().unwrap()); + assert_eq!(Api::Personal, "personal".parse().unwrap()); + assert_eq!(Api::Signer, "signer".parse().unwrap()); + assert_eq!(Api::Ethcore, "ethcore".parse().unwrap()); + assert_eq!(Api::EthcoreSet, "ethcore_set".parse().unwrap()); + assert_eq!(Api::Traces, "traces".parse().unwrap()); + assert_eq!(Api::Rpc, "rpc".parse().unwrap()); + assert!("rp".parse::().is_err()); + } + + #[test] + fn test_api_set_default() { + assert_eq!(ApiSet::UnsafeContext, ApiSet::default()); + } + + #[test] + fn test_api_set_parsing() { + assert_eq!(ApiSet::List(vec![Api::Web3, Api::Eth].into_iter().collect()), "web3,eth".parse().unwrap()); + } + + #[test] + fn test_api_set_unsafe_context() { + let expected = vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Ethcore, Api::Traces, Api::Rpc] + .into_iter().collect(); + assert_eq!(ApiSet::UnsafeContext.list_apis(), expected); + } + + #[test] + fn test_api_set_safe_context() { + let expected = vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Signer, Api::Ethcore, Api::EthcoreSet, Api::Traces, Api::Rpc] + .into_iter().collect(); + assert_eq!(ApiSet::SafeContext.list_apis(), expected); + } +} diff --git a/parity/run.rs b/parity/run.rs new file mode 100644 index 000000000..7c8262a66 --- /dev/null +++ b/parity/run.rs @@ -0,0 +1,329 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::{Arc, Mutex, Condvar}; +use std::path::Path; +use ctrlc::CtrlC; +use fdlimit::raise_fd_limit; +use ethcore_logger::{Config as LogConfig, setup_log}; +use util::network_settings::NetworkSettings; +use util::{Colour, version, NetworkConfiguration, U256}; +use util::panics::{MayPanic, ForwardPanic, PanicHandler}; +use ethcore::client::{Mode, Switch, DatabaseCompactionProfile, VMType}; +use ethcore::service::ClientService; +use ethcore::account_provider::AccountProvider; +use ethcore::miner::{Miner, MinerService, ExternalMiner, MinerOptions}; +use ethsync::SyncConfig; +use informant::Informant; + +use rpc::{HttpServer, IpcServer, HttpConfiguration, IpcConfiguration}; +use signer::SignerServer; +use dapps::WebappServer; +use io_handler::ClientIoHandler; +use params::{SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras}; +use helpers::{to_client_config, execute_upgrades, passwords_from_files}; +use dir::Directories; +use cache::CacheConfig; +use dapps; +use signer; +use modules; +use rpc_apis; +use rpc; +use url; + +#[derive(Debug, PartialEq)] +pub struct RunCmd { + pub cache_config: CacheConfig, + pub dirs: Directories, + pub spec: SpecType, + pub pruning: Pruning, + /// Some if execution should be daemonized. Contains pid_file path. + pub daemon: Option, + pub logger_config: LogConfig, + pub miner_options: MinerOptions, + pub http_conf: HttpConfiguration, + pub ipc_conf: IpcConfiguration, + pub net_conf: NetworkConfiguration, + pub network_id: Option, + pub acc_conf: AccountsConfig, + pub gas_pricer: GasPricerConfig, + pub miner_extras: MinerExtras, + pub mode: Mode, + pub tracing: Switch, + pub compaction: DatabaseCompactionProfile, + pub vm_type: VMType, + pub enable_network: bool, + pub geth_compatibility: bool, + pub signer_port: Option, + pub net_settings: NetworkSettings, + pub dapps_conf: dapps::Configuration, + pub signer_conf: signer::Configuration, + pub ui: bool, + pub name: String, + pub custom_bootnodes: bool, +} + +pub fn execute(cmd: RunCmd) -> Result<(), String> { + // create supervisor + let mut hypervisor = modules::hypervisor(); + + // increase max number of open files + raise_fd_limit(); + + // set up logger + let logger = try!(setup_log(&cmd.logger_config)); + + // set up panic handler + let panic_handler = PanicHandler::new_in_arc(); + + // create dirs used by parity + try!(cmd.dirs.create_dirs()); + + // load spec + let spec = try!(cmd.spec.spec()); + let fork_name = spec.fork_name.clone(); + + // load genesis hash + let genesis_hash = spec.genesis_header().hash(); + + // select pruning algorithm + let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, fork_name.as_ref()); + + // prepare client_path + let client_path = cmd.dirs.client_path(genesis_hash, fork_name.as_ref(), algorithm); + + // execute upgrades + try!(execute_upgrades(&cmd.dirs, genesis_hash, fork_name.as_ref(), algorithm)); + + // run in daemon mode + if let Some(pid_file) = cmd.daemon { + try!(daemonize(pid_file)); + } + + // display info about used pruning algorithm + info!("Starting {}", Colour::White.bold().paint(version())); + info!("Using state DB journalling strategy {}", Colour::White.bold().paint(algorithm.as_str())); + + // display warning about using experimental journaldb alorithm + if !algorithm.is_stable() { + warn!("Your chosen strategy is {}! You can re-run with --pruning to change.", Colour::Red.bold().paint("unstable")); + } + + // create sync config + let mut sync_config = SyncConfig::default(); + sync_config.network_id = match cmd.network_id { + Some(id) => id, + None => spec.network_id(), + }; + + // prepare account provider + let account_provider = Arc::new(try!(prepare_account_provider(&cmd.dirs, cmd.acc_conf))); + + // create miner + let miner = Miner::new(cmd.miner_options, cmd.gas_pricer.into(), spec, Some(account_provider.clone())); + miner.set_author(cmd.miner_extras.author); + miner.set_gas_floor_target(cmd.miner_extras.gas_floor_target); + miner.set_gas_ceil_target(cmd.miner_extras.gas_ceil_target); + miner.set_extra_data(cmd.miner_extras.extra_data); + miner.set_transactions_limit(cmd.miner_extras.transactions_limit); + + // create client config + let client_config = to_client_config( + &cmd.cache_config, + &cmd.dirs, + genesis_hash, + cmd.mode, + cmd.tracing, + cmd.pruning, + cmd.compaction, + cmd.vm_type, + cmd.name, + fork_name.as_ref(), + ); + + // load spec + // TODO: make it clonable and load it only once! + let spec = try!(cmd.spec.spec()); + + // set up bootnodes + let mut net_conf = cmd.net_conf; + if !cmd.custom_bootnodes { + net_conf.boot_nodes = spec.nodes.clone(); + } + + // create client + let service = try!(ClientService::start( + client_config, + spec, + Path::new(&client_path), + miner.clone(), + ).map_err(|e| format!("Client service error: {:?}", e))); + + // forward panics from service + panic_handler.forward_from(&service); + + // take handle to client + let client = service.client(); + + // create external miner + let external_miner = Arc::new(ExternalMiner::default()); + + // create sync object + let (sync_provider, manage_network, chain_notify) = try!(modules::sync( + &mut hypervisor, sync_config, net_conf.into(), client.clone(), &cmd.logger_config, + ).map_err(|e| format!("Sync error: {}", e))); + + service.add_notify(chain_notify.clone()); + + // start network + if cmd.enable_network { + chain_notify.start(); + } + + // set up dependencies for rpc servers + let deps_for_rpc_apis = Arc::new(rpc_apis::Dependencies { + signer_port: cmd.signer_port, + signer_queue: Arc::new(rpc_apis::ConfirmationsQueue::default()), + client: client.clone(), + sync: sync_provider.clone(), + net: manage_network.clone(), + secret_store: account_provider.clone(), + miner: miner.clone(), + external_miner: external_miner.clone(), + logger: logger.clone(), + settings: Arc::new(cmd.net_settings.clone()), + allow_pending_receipt_query: !cmd.geth_compatibility, + net_service: manage_network.clone() + }); + + let dependencies = rpc::Dependencies { + panic_handler: panic_handler.clone(), + apis: deps_for_rpc_apis.clone(), + }; + + // start rpc servers + let http_server = try!(rpc::new_http(cmd.http_conf, &dependencies)); + let ipc_server = try!(rpc::new_ipc(cmd.ipc_conf, &dependencies)); + + let dapps_deps = dapps::Dependencies { + panic_handler: panic_handler.clone(), + apis: deps_for_rpc_apis.clone(), + }; + + // start dapps server + let dapps_server = try!(dapps::new(cmd.dapps_conf.clone(), dapps_deps)); + + let signer_deps = signer::Dependencies { + panic_handler: panic_handler.clone(), + apis: deps_for_rpc_apis.clone(), + }; + + // start signer server + let signer_server = try!(signer::start(cmd.signer_conf, signer_deps)); + + let io_handler = Arc::new(ClientIoHandler { + client: service.client(), + info: Arc::new(Informant::new(client.clone(), Some(sync_provider.clone()), Some(manage_network.clone()), cmd.logger_config.color)), + sync: sync_provider.clone(), + net: manage_network.clone(), + accounts: account_provider.clone(), + }); + service.register_io_handler(io_handler).expect("Error registering IO handler"); + + // start ui + if cmd.ui { + if !cmd.dapps_conf.enabled { + return Err("Cannot use UI command with Dapps turned off.".into()) + } + url::open(&format!("http://{}:{}/", cmd.dapps_conf.interface, cmd.dapps_conf.port)); + } + + // Handle exit + wait_for_exit(panic_handler, http_server, ipc_server, dapps_server, signer_server); + + Ok(()) +} + +#[cfg(not(windows))] +fn daemonize(pid_file: String) -> Result<(), String> { + extern crate daemonize; + + daemonize::Daemonize::new() + .pid_file(pid_file) + .chown_pid_file(true) + .start() + .map(|_| ()) + .map_err(|e| format!("Couldn't daemonize; {}", e)) +} + +#[cfg(windows)] +fn daemonize(_pid_file: String) -> Result<(), String> { + Err("daemon is no supported on windows".into()) +} + +fn prepare_account_provider(dirs: &Directories, cfg: AccountsConfig) -> Result { + use ethcore::ethstore::{import_accounts, EthStore}; + use ethcore::ethstore::dir::{GethDirectory, DirectoryType, DiskDirectory}; + + let passwords = try!(passwords_from_files(cfg.password_files)); + + if cfg.import_keys { + let t = if cfg.testnet { + DirectoryType::Testnet + } else { + DirectoryType::Main + }; + + let from = GethDirectory::open(t); + let to = DiskDirectory::create(dirs.keys.clone()).unwrap(); + // ignore error, cause geth may not exist + let _ = import_accounts(&from, &to); + } + + let dir = Box::new(DiskDirectory::create(dirs.keys.clone()).unwrap()); + let account_service = AccountProvider::new(Box::new(EthStore::open_with_iterations(dir, cfg.iterations).unwrap())); + + for a in cfg.unlocked_accounts { + if passwords.iter().find(|p| account_service.unlock_account_permanently(a, (*p).clone()).is_ok()).is_none() { + return Err(format!("No password given to unlock account {}. Pass the password using `--password`.", a)); + } + } + + Ok(account_service) +} + +fn wait_for_exit( + panic_handler: Arc, + _http_server: Option, + _ipc_server: Option, + _dapps_server: Option, + _signer_server: Option + ) { + let exit = Arc::new(Condvar::new()); + + // Handle possible exits + let e = exit.clone(); + CtrlC::set_handler(move || { e.notify_all(); }); + + // Handle panics + let e = exit.clone(); + panic_handler.on_panic(move |_reason| { e.notify_all(); }); + + // Wait for signal + let mutex = Mutex::new(()); + let _ = exit.wait(mutex.lock().unwrap()); + info!("Finishing work, please wait..."); +} diff --git a/parity/signer.rs b/parity/signer.rs index 4cf9b006d..d85e6e3e3 100644 --- a/parity/signer.rs +++ b/parity/signer.rs @@ -22,28 +22,38 @@ use util::panics::{ForwardPanic, PanicHandler}; use util::path::restrict_permissions_owner; use rpc_apis; use ethcore_signer as signer; -use die::*; - +use helpers::replace_home; pub use ethcore_signer::Server as SignerServer; const CODES_FILENAME: &'static str = "authcodes"; +#[derive(Debug, PartialEq)] pub struct Configuration { pub enabled: bool, pub port: u16, pub signer_path: String, } +impl Default for Configuration { + fn default() -> Self { + Configuration { + enabled: true, + port: 8180, + signer_path: replace_home("$HOME/.parity/signer"), + } + } +} + pub struct Dependencies { pub panic_handler: Arc, pub apis: Arc, } -pub fn start(conf: Configuration, deps: Dependencies) -> Option { +pub fn start(conf: Configuration, deps: Dependencies) -> Result, String> { if !conf.enabled { - None + Ok(None) } else { - Some(do_start(conf, deps)) + Ok(Some(try!(do_start(conf, deps)))) } } @@ -54,7 +64,13 @@ fn codes_path(path: String) -> PathBuf { p } -pub fn new_token(path: String) -> io::Result { +pub fn new_token(path: String) -> Result { + generate_new_token(path) + .map(|code| format!("This key code will authorise your System Signer UI: {}", Colour::White.bold().paint(code))) + .map_err(|err| format!("Error generating token: {:?}", err)) +} + +fn generate_new_token(path: String) -> io::Result { let path = codes_path(path); let mut codes = try!(signer::AuthCodes::from_file(&path)); let code = try!(codes.generate_new()); @@ -63,10 +79,10 @@ pub fn new_token(path: String) -> io::Result { Ok(code) } -fn do_start(conf: Configuration, deps: Dependencies) -> SignerServer { - let addr = format!("127.0.0.1:{}", conf.port).parse().unwrap_or_else(|_| { - die!("Invalid port specified: {}", conf.port) - }); +fn do_start(conf: Configuration, deps: Dependencies) -> Result { + let addr = try!(format!("127.0.0.1:{}", conf.port) + .parse() + .map_err(|_| format!("Invalid port specified: {}", conf.port))); let start_result = { let server = signer::ServerBuilder::new( @@ -78,11 +94,11 @@ fn do_start(conf: Configuration, deps: Dependencies) -> SignerServer { }; match start_result { - Err(signer::ServerError::IoError(err)) => die_with_io_error("Trusted Signer", err), - Err(e) => die!("Trusted Signer: {:?}", e), + Err(signer::ServerError::IoError(err)) => Err(format!("Trusted Signer Error: {}", err)), + Err(e) => Err(format!("Trusted Signer Error: {:?}", e)), Ok(server) => { deps.panic_handler.forward_from(&server); - server + Ok(server) }, } } diff --git a/parity/sync/main.rs b/parity/sync/main.rs index 272248785..f99d3b2bc 100644 --- a/parity/sync/main.rs +++ b/parity/sync/main.rs @@ -38,8 +38,7 @@ use ethsync::{SyncProvider, EthSync, ManageNetwork, ServiceConfiguration}; use std::thread; use nanoipc::IpcInterface; -use ethcore_logger::Settings as LogSettings; -use ethcore_logger::setup_log; +use ethcore_logger::{Config as LogConfig, setup_log}; const USAGE: &'static str = " Ethcore sync service @@ -63,18 +62,12 @@ struct Args { } impl Args { - pub fn log_settings(&self) -> LogSettings { - let mut settings = LogSettings::new(); - if self.flag_no_color || cfg!(windows) { - settings = settings.no_color(); + pub fn log_settings(&self) -> LogConfig { + LogConfig { + color: self.flag_no_color || cfg!(windows), + mode: self.flag_logging.clone(), + file: self.flag_log_file.clone(), } - if let Some(ref init) = self.flag_logging { - settings = settings.init(init.to_owned()) - } - if let Some(ref file) = self.flag_log_file { - settings = settings.file(file.to_owned()) - } - settings } } @@ -97,7 +90,7 @@ fn main() { .and_then(|d| d.decode()) .unwrap_or_else(|e| e.exit()); - setup_log(&args.log_settings()); + setup_log(&args.log_settings()).expect("Log initialization failure"); let mut buffer = Vec::new(); io::stdin().read_to_end(&mut buffer).expect("Failed to read initialisation payload"); diff --git a/util/bigint/src/uint.rs b/util/bigint/src/uint.rs index e3c27f80d..766fa33d3 100644 --- a/util/bigint/src/uint.rs +++ b/util/bigint/src/uint.rs @@ -565,7 +565,7 @@ macro_rules! construct_uint { impl Uint for $name { fn from_dec_str(value: &str) -> Result { - if value.bytes().any(|b| b < 48 && b > 57) { + if !value.bytes().all(|b| b >= 48 && b <= 57) { return Err(FromDecStrErr::InvalidCharacter) } @@ -1788,6 +1788,7 @@ mod tests { assert_eq!(U256::from_dec_str("10").unwrap(), U256::from(10u64)); assert_eq!(U256::from_dec_str("1024").unwrap(), U256::from(1024u64)); assert_eq!(U256::from_dec_str("115792089237316195423570985008687907853269984665640564039457584007913129639936"), Err(FromDecStrErr::InvalidLength)); + assert_eq!(U256::from_dec_str("0x11"), Err(FromDecStrErr::InvalidCharacter)); } #[test] diff --git a/util/fdlimit/src/lib.rs b/util/fdlimit/src/lib.rs index e659bb8c9..92c403058 100644 --- a/util/fdlimit/src/lib.rs +++ b/util/fdlimit/src/lib.rs @@ -1,18 +1,19 @@ // Copyright 2015, 2016 Ethcore (UK) Ltd. // This file is part of Parity. -// +// // Parity is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// +// // Parity is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. -// +// // You should have received a copy of the GNU General Public License // along with Parity. If not, see .extern crate libc; + extern crate libc; -pub mod raise_fd_limit; +mod raise_fd_limit; pub use raise_fd_limit::raise_fd_limit; diff --git a/util/fdlimit/src/raise_fd_limit.rs b/util/fdlimit/src/raise_fd_limit.rs index 92127da35..d0539fda9 100644 --- a/util/fdlimit/src/raise_fd_limit.rs +++ b/util/fdlimit/src/raise_fd_limit.rs @@ -15,70 +15,74 @@ /// #[cfg(any(target_os = "macos", target_os = "ios"))] #[allow(non_camel_case_types)] -pub unsafe fn raise_fd_limit() { - use libc; - use std::cmp; - use std::io; - use std::mem::size_of_val; - use std::ptr::null_mut; +pub fn raise_fd_limit() { + use libc; + use std::cmp; + use std::io; + use std::mem::size_of_val; + use std::ptr::null_mut; - static CTL_KERN: libc::c_int = 1; - static KERN_MAXFILESPERPROC: libc::c_int = 29; + unsafe { + static CTL_KERN: libc::c_int = 1; + static KERN_MAXFILESPERPROC: libc::c_int = 29; - // The strategy here is to fetch the current resource limits, read the - // kern.maxfilesperproc sysctl value, and bump the soft resource limit for - // maxfiles up to the sysctl value. + // The strategy here is to fetch the current resource limits, read the + // kern.maxfilesperproc sysctl value, and bump the soft resource limit for + // maxfiles up to the sysctl value. - // Fetch the kern.maxfilesperproc value - let mut mib: [libc::c_int; 2] = [CTL_KERN, KERN_MAXFILESPERPROC]; - let mut maxfiles: libc::c_int = 0; - let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t; - if libc::sysctl(&mut mib[0], 2, &mut maxfiles as *mut _ as *mut _, &mut size, - null_mut(), 0) != 0 { - let err = io::Error::last_os_error(); - panic!("raise_fd_limit: error calling sysctl: {}", err); - } + // Fetch the kern.maxfilesperproc value + let mut mib: [libc::c_int; 2] = [CTL_KERN, KERN_MAXFILESPERPROC]; + let mut maxfiles: libc::c_int = 0; + let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t; + if libc::sysctl(&mut mib[0], 2, &mut maxfiles as *mut _ as *mut _, &mut size, + null_mut(), 0) != 0 { + let err = io::Error::last_os_error(); + panic!("raise_fd_limit: error calling sysctl: {}", err); + } - // Fetch the current resource limits - let mut rlim = libc::rlimit{rlim_cur: 0, rlim_max: 0}; - if libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) != 0 { - let err = io::Error::last_os_error(); - panic!("raise_fd_limit: error calling getrlimit: {}", err); - } + // Fetch the current resource limits + let mut rlim = libc::rlimit{rlim_cur: 0, rlim_max: 0}; + if libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) != 0 { + let err = io::Error::last_os_error(); + panic!("raise_fd_limit: error calling getrlimit: {}", err); + } - // Bump the soft limit to the smaller of kern.maxfilesperproc and the hard - // limit - rlim.rlim_cur = cmp::min(maxfiles as libc::rlim_t, rlim.rlim_max); + // Bump the soft limit to the smaller of kern.maxfilesperproc and the hard + // limit + rlim.rlim_cur = cmp::min(maxfiles as libc::rlim_t, rlim.rlim_max); - // Set our newly-increased resource limit - if libc::setrlimit(libc::RLIMIT_NOFILE, &rlim) != 0 { - let err = io::Error::last_os_error(); - panic!("raise_fd_limit: error calling setrlimit: {}", err); - } + // Set our newly-increased resource limit + if libc::setrlimit(libc::RLIMIT_NOFILE, &rlim) != 0 { + let err = io::Error::last_os_error(); + panic!("raise_fd_limit: error calling setrlimit: {}", err); + } + } } #[cfg(any(target_os = "linux"))] #[allow(non_camel_case_types)] -pub unsafe fn raise_fd_limit() { - use libc; - use std::io; +pub fn raise_fd_limit() { + use libc; + use std::io; - // Fetch the current resource limits - let mut rlim = libc::rlimit{rlim_cur: 0, rlim_max: 0}; - if libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) != 0 { - let err = io::Error::last_os_error(); - panic!("raise_fd_limit: error calling getrlimit: {}", err); - } + unsafe { + // Fetch the current resource limits + let mut rlim = libc::rlimit{rlim_cur: 0, rlim_max: 0}; + if libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) != 0 { + let err = io::Error::last_os_error(); + panic!("raise_fd_limit: error calling getrlimit: {}", err); + } - // Set soft limit to hard imit - rlim.rlim_cur = rlim.rlim_max; + // Set soft limit to hard imit + rlim.rlim_cur = rlim.rlim_max; - // Set our newly-increased resource limit - if libc::setrlimit(libc::RLIMIT_NOFILE, &rlim) != 0 { - let err = io::Error::last_os_error(); - panic!("raise_fd_limit: error calling setrlimit: {}", err); - } + // Set our newly-increased resource limit + if libc::setrlimit(libc::RLIMIT_NOFILE, &rlim) != 0 { + let err = io::Error::last_os_error(); + panic!("raise_fd_limit: error calling setrlimit: {}", err); + } + } } #[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "linux")))] -pub unsafe fn raise_fd_limit() {} +pub fn raise_fd_limit() {} diff --git a/util/src/hash.rs b/util/src/hash.rs index 1b868b87b..d43730c7a 100644 --- a/util/src/hash.rs +++ b/util/src/hash.rs @@ -67,7 +67,7 @@ pub trait FixedHash: Sized + BytesConvertable + Populatable + FromStr + Default /// Return `s` without the `0x` at the beginning of it, if any. pub fn clean_0x(s: &str) -> &str { - if s.len() >= 2 && &s[0..2] == "0x" { + if s.starts_with("0x") { &s[2..] } else { s @@ -429,13 +429,13 @@ macro_rules! impl_hash { } } - impl<'a> From<&'a str> for $from { - fn from(s: &'a str) -> $from { - use std::str::FromStr; + impl From<&'static str> for $from { + fn from(s: &'static str) -> $from { + let s = clean_0x(s); if s.len() % 2 == 1 { - $from::from_str(&("0".to_owned() + &(clean_0x(s).to_owned()))[..]).unwrap_or_else(|_| $from::new()) + $from::from_str(&("0".to_owned() + s)).unwrap() } else { - $from::from_str(clean_0x(s)).unwrap_or_else(|_| $from::new()) + $from::from_str(s).unwrap() } } } @@ -613,8 +613,6 @@ mod tests { assert_eq!(H64::from(0x1234567890abcdef), H64::from("0x1234567890abcdef")); assert_eq!(H64::from(0x1234567890abcdef), H64::from("1234567890abcdef")); assert_eq!(H64::from(0x234567890abcdef), H64::from("0x234567890abcdef")); - // too short. - assert_eq!(H64::from(0), H64::from("0x34567890abcdef")); } #[test] diff --git a/util/src/journaldb/mod.rs b/util/src/journaldb/mod.rs index 15508c8ba..20b22c8d0 100644 --- a/util/src/journaldb/mod.rs +++ b/util/src/journaldb/mod.rs @@ -30,7 +30,7 @@ mod refcounteddb; pub use self::traits::JournalDB; /// A journal database algorithm. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, PartialEq, Clone, Copy)] pub enum Algorithm { /// Keep all keys forever. Archive, @@ -60,14 +60,48 @@ impl Default for Algorithm { fn default() -> Algorithm { Algorithm::OverlayRecent } } +impl FromStr for Algorithm { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "archive" => Ok(Algorithm::Archive), + "light" => Ok(Algorithm::EarlyMerge), + "fast" => Ok(Algorithm::OverlayRecent), + "basic" => Ok(Algorithm::RefCounted), + e => Err(format!("Invalid algorithm: {}", e)), + } + } +} + +impl Algorithm { + /// Returns static str describing journal database algorithm. + pub fn as_str(&self) -> &'static str { + match *self { + Algorithm::Archive => "archive", + Algorithm::EarlyMerge => "light", + Algorithm::OverlayRecent => "fast", + Algorithm::RefCounted => "basic", + } + } + + /// Returns true if pruning strategy is stable + pub fn is_stable(&self) -> bool { + match *self { + Algorithm::Archive | Algorithm::OverlayRecent => true, + _ => false, + } + } + + /// Returns all algorithm types. + pub fn all_types() -> Vec { + vec![Algorithm::Archive, Algorithm::EarlyMerge, Algorithm::OverlayRecent, Algorithm::RefCounted] + } +} + impl fmt::Display for Algorithm { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", match self { - &Algorithm::Archive => "archive", - &Algorithm::EarlyMerge => "earlymerge", - &Algorithm::OverlayRecent => "overlayrecent", - &Algorithm::RefCounted => "refcounted", - }) + write!(f, "{}", self.as_str()) } } @@ -85,3 +119,60 @@ pub fn new(path: &str, algorithm: Algorithm, config: DatabaseConfig) -> Box archive += 1, + Algorithm::EarlyMerge => earlymerge += 1, + Algorithm::OverlayRecent => overlayrecent += 1, + Algorithm::RefCounted => refcounted += 1, + } + } + + assert_eq!(archive, 1); + assert_eq!(earlymerge, 1); + assert_eq!(overlayrecent, 1); + assert_eq!(refcounted, 1); + } +} diff --git a/util/src/network/host.rs b/util/src/network/host.rs index e986c6020..f90e8cd57 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -51,7 +51,7 @@ const MAX_HANDSHAKES: usize = 80; const MAX_HANDSHAKES_PER_ROUND: usize = 32; const MAINTENANCE_TIMEOUT: u64 = 1000; -#[derive(Debug, Clone)] +#[derive(Debug, PartialEq, Clone)] /// Network service configuration pub struct NetworkConfiguration { /// Directory path to store network configuration. None means nothing will be saved diff --git a/util/src/network/ip_utils.rs b/util/src/network/ip_utils.rs index 27ff29737..276c3fec0 100644 --- a/util/src/network/ip_utils.rs +++ b/util/src/network/ip_utils.rs @@ -16,17 +16,12 @@ // Based on original work by David Levy https://raw.githubusercontent.com/dlevy47/rust-interfaces -use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; use std::io; use igd::{PortMappingProtocol, search_gateway_from_timeout}; use std::time::Duration; use network::node_table::{NodeEndpoint}; -pub enum IpAddr{ - V4(Ipv4Addr), - V6(Ipv6Addr), -} - /// Socket address extension for rustc beta. To be replaces with now unstable API pub trait SocketAddrExt { /// Returns true for the special 'unspecified' address 0.0.0.0. @@ -66,8 +61,7 @@ mod getinterfaces { use std::{mem, io, ptr}; use libc::{AF_INET, AF_INET6}; use libc::{getifaddrs, freeifaddrs, ifaddrs, sockaddr, sockaddr_in, sockaddr_in6}; - use std::net::{Ipv4Addr, Ipv6Addr}; - use super::IpAddr; + use std::net::{Ipv4Addr, Ipv6Addr, IpAddr}; fn convert_sockaddr (sa: *mut sockaddr) -> Option { if sa == ptr::null_mut() { return None; } diff --git a/util/src/network_settings.rs b/util/src/network_settings.rs index 7f02272b6..7f0b18f22 100644 --- a/util/src/network_settings.rs +++ b/util/src/network_settings.rs @@ -16,7 +16,7 @@ //! Structure to hold network settings configured from CLI /// Networking & RPC settings -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub struct NetworkSettings { /// Node name pub name: String, @@ -34,3 +34,16 @@ pub struct NetworkSettings { pub rpc_port: u16, } +impl Default for NetworkSettings { + fn default() -> Self { + NetworkSettings { + name: "".into(), + chain: "homestead".into(), + max_peers: 25, + network_port: 30303, + rpc_enabled: true, + rpc_interface: "local".into(), + rpc_port: 8545 + } + } +} diff --git a/util/src/trie/mod.rs b/util/src/trie/mod.rs index d608863cd..7881240b5 100644 --- a/util/src/trie/mod.rs +++ b/util/src/trie/mod.rs @@ -63,7 +63,7 @@ impl fmt::Display for TrieError { } /// Trie types -#[derive(Debug, Clone)] +#[derive(Debug, PartialEq, Clone)] pub enum TrieSpec { /// Generic trie. Generic,