diff --git a/appveyor.yml b/appveyor.yml
index 26f82122f..3ffaa961e 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -31,10 +31,10 @@ install:
build: off
test_script:
- - cargo test --verbose --release --no-default-features
+ - cargo test --verbose --release
after_test:
- - cargo build --verbose --release --no-default-features
+ - cargo build --verbose --release
- ps: if($env:cert) { Start-FileDownload $env:cert -FileName $env:keyfile }
- ps: if($env:cert) { signtool sign /f $env:keyfile /p $env:certpass target\release\parity.exe }
- makensis.exe nsis\installer.nsi
diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs
index 623fab4a6..02535fa11 100644
--- a/ethcore/src/block_queue.rs
+++ b/ethcore/src/block_queue.rs
@@ -36,7 +36,7 @@ const MIN_MEM_LIMIT: usize = 16384;
const MIN_QUEUE_LIMIT: usize = 512;
/// Block queue configuration
-#[derive(Debug)]
+#[derive(Debug, PartialEq)]
pub struct BlockQueueConfig {
/// Maximum number of blocks to keep in unverified queue.
/// When the limit is reached, is_full returns true.
diff --git a/ethcore/src/blockchain/config.rs b/ethcore/src/blockchain/config.rs
index e063d4269..1a0ab9d42 100644
--- a/ethcore/src/blockchain/config.rs
+++ b/ethcore/src/blockchain/config.rs
@@ -17,7 +17,7 @@
//! Blockchain configuration.
/// Blockchain configuration.
-#[derive(Debug)]
+#[derive(Debug, PartialEq)]
pub struct Config {
/// Preferred cache size in bytes.
pub pref_cache_size: usize,
diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs
index 0bb387e74..18a54de70 100644
--- a/ethcore/src/client/client.rs
+++ b/ethcore/src/client/client.rs
@@ -17,7 +17,7 @@
use std::collections::{HashSet, HashMap, VecDeque};
use std::ops::Deref;
use std::sync::{Arc, Weak};
-use std::path::{Path, PathBuf};
+use std::path::{Path};
use std::fmt;
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
use std::time::{Instant};
@@ -141,26 +141,10 @@ pub struct Client {
}
const HISTORY: u64 = 1200;
-// DO NOT TOUCH THIS ANY MORE UNLESS YOU REALLY KNOW WHAT YOU'RE DOING.
-// Altering it will force a blanket DB update for *all* JournalDB-derived
-// databases.
-// Instead, add/upgrade the version string of the individual JournalDB-derived database
-// of which you actually want force an upgrade.
-const CLIENT_DB_VER_STR: &'static str = "5.3";
-
-/// Get the path for the databases given the root path and information on the databases.
-pub fn get_db_path(path: &Path, pruning: journaldb::Algorithm, genesis_hash: H256, fork_name: Option<&String>) -> PathBuf {
- let mut dir = path.to_path_buf();
- dir.push(format!("{:?}{}", H64::from(genesis_hash), fork_name.map(|f| format!("-{}", f)).unwrap_or_default()));
- //TODO: sec/fat: pruned/full versioning
- // version here is a bit useless now, since it's controlled only be the pruning algo.
- dir.push(format!("v{}-sec-{}", CLIENT_DB_VER_STR, pruning));
- dir
-}
/// Append a path element to the given path and return the string.
-pub fn append_path(path: &Path, item: &str) -> String {
- let mut p = path.to_path_buf();
+pub fn append_path
(path: P, item: &str) -> String where P: AsRef {
+ let mut p = path.as_ref().to_path_buf();
p.push(item);
p.to_str().unwrap().to_owned()
}
@@ -174,7 +158,7 @@ impl Client {
miner: Arc,
message_channel: IoChannel,
) -> Result, ClientError> {
- let path = get_db_path(path, config.pruning, spec.genesis_header().hash(), spec.fork_name.as_ref());
+ let path = path.to_path_buf();
let gb = spec.genesis_block();
let chain = Arc::new(BlockChain::new(config.blockchain, &gb, &path));
let tracedb = Arc::new(try!(TraceDB::new(config.tracing, &path, chain.clone())));
diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs
index 1010ce656..4cfd04e2d 100644
--- a/ethcore/src/client/config.rs
+++ b/ethcore/src/client/config.rs
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see .
+use std::str::FromStr;
pub use std::time::Duration;
pub use block_queue::BlockQueueConfig;
pub use blockchain::Config as BlockChainConfig;
@@ -33,7 +34,21 @@ pub enum DatabaseCompactionProfile {
}
impl Default for DatabaseCompactionProfile {
- fn default() -> Self { DatabaseCompactionProfile::Default }
+ fn default() -> Self {
+ DatabaseCompactionProfile::Default
+ }
+}
+
+impl FromStr for DatabaseCompactionProfile {
+ type Err = String;
+
+ fn from_str(s: &str) -> Result {
+ match s {
+ "ssd" | "default" => Ok(DatabaseCompactionProfile::Default),
+ "hdd" => Ok(DatabaseCompactionProfile::HDD),
+ _ => Err(format!("Invalid compaction profile given. Expected hdd/ssd (default).")),
+ }
+ }
}
/// Operating mode for the client.
@@ -50,11 +65,13 @@ pub enum Mode {
}
impl Default for Mode {
- fn default() -> Self { Mode::Active }
+ fn default() -> Self {
+ Mode::Active
+ }
}
/// Client configuration. Includes configs for all sub-systems.
-#[derive(Debug, Default)]
+#[derive(Debug, PartialEq, Default)]
pub struct ClientConfig {
/// Block queue configuration.
pub queue: BlockQueueConfig,
@@ -79,3 +96,25 @@ pub struct ClientConfig {
/// Type of block verifier used by client.
pub verifier_type: VerifierType,
}
+
+#[cfg(test)]
+mod test {
+ use super::{DatabaseCompactionProfile, Mode};
+
+ #[test]
+ fn test_default_compaction_profile() {
+ assert_eq!(DatabaseCompactionProfile::default(), DatabaseCompactionProfile::Default);
+ }
+
+ #[test]
+ fn test_parsing_compaction_profile() {
+ assert_eq!(DatabaseCompactionProfile::Default, "ssd".parse().unwrap());
+ assert_eq!(DatabaseCompactionProfile::Default, "default".parse().unwrap());
+ assert_eq!(DatabaseCompactionProfile::HDD, "hdd".parse().unwrap());
+ }
+
+ #[test]
+ fn test_mode_default() {
+ assert_eq!(Mode::default(), Mode::Active);
+ }
+}
diff --git a/ethcore/src/evm/factory.rs b/ethcore/src/evm/factory.rs
index 082b9d050..50c384a99 100644
--- a/ethcore/src/evm/factory.rs
+++ b/ethcore/src/evm/factory.rs
@@ -21,7 +21,7 @@ use std::fmt;
use evm::Evm;
use util::{U256, Uint};
-#[derive(Debug, Clone)]
+#[derive(Debug, PartialEq, Clone)]
/// Type of EVM to use.
pub enum VMType {
/// JIT EVM
diff --git a/ethcore/src/json_tests/trie.rs b/ethcore/src/json_tests/trie.rs
index 2d23ff7d2..e62fd01b3 100644
--- a/ethcore/src/json_tests/trie.rs
+++ b/ethcore/src/json_tests/trie.rs
@@ -15,7 +15,7 @@
// along with Parity. If not, see .
use ethjson;
-use util::{H256, MemoryDB, TrieMut, TrieSpec, TrieFactory};
+use util::{H256, MemoryDB, TrieSpec, TrieFactory};
fn test_trie(json: &[u8], trie: TrieSpec) -> Vec {
let tests = ethjson::trie::Test::load(json).unwrap();
diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs
index 97ba6c082..79c8a95bf 100644
--- a/ethcore/src/miner/miner.rs
+++ b/ethcore/src/miner/miner.rs
@@ -36,7 +36,7 @@ use client::TransactionImportResult;
use miner::price_info::PriceInfo;
/// Different possible definitions for pending transaction set.
-#[derive(Debug)]
+#[derive(Debug, PartialEq)]
pub enum PendingSet {
/// Always just the transactions in the queue. These have had only cheap checks.
AlwaysQueue,
@@ -48,7 +48,7 @@ pub enum PendingSet {
}
/// Configures the behaviour of the miner.
-#[derive(Debug)]
+#[derive(Debug, PartialEq)]
pub struct MinerOptions {
/// URLs to notify when there is new work.
pub new_work_notify: Vec,
@@ -77,12 +77,12 @@ impl Default for MinerOptions {
MinerOptions {
new_work_notify: vec![],
force_sealing: false,
- reseal_on_external_tx: true,
+ reseal_on_external_tx: false,
reseal_on_own_tx: true,
tx_gas_limit: !U256::zero(),
tx_queue_size: 1024,
pending_set: PendingSet::AlwaysQueue,
- reseal_min_period: Duration::from_secs(0),
+ reseal_min_period: Duration::from_secs(2),
work_queue_size: 20,
enable_resubmission: true,
}
@@ -90,6 +90,7 @@ impl Default for MinerOptions {
}
/// Options for the dynamic gas price recalibrator.
+#[derive(Debug, PartialEq)]
pub struct GasPriceCalibratorOptions {
/// Base transaction price to match against.
pub usd_per_tx: f32,
@@ -98,9 +99,9 @@ pub struct GasPriceCalibratorOptions {
}
/// The gas price validator variant for a `GasPricer`.
+#[derive(Debug, PartialEq)]
pub struct GasPriceCalibrator {
options: GasPriceCalibratorOptions,
-
next_calibration: Instant,
}
@@ -128,6 +129,7 @@ impl GasPriceCalibrator {
}
/// Struct to look after updating the acceptable gas price of a miner.
+#[derive(Debug, PartialEq)]
pub enum GasPricer {
/// A fixed gas price in terms of Wei - always the argument given.
Fixed(U256),
diff --git a/ethcore/src/trace/config.rs b/ethcore/src/trace/config.rs
index 2876baa28..1c4646817 100644
--- a/ethcore/src/trace/config.rs
+++ b/ethcore/src/trace/config.rs
@@ -15,6 +15,7 @@
// along with Parity. If not, see .
//! Traces config.
+use std::str::FromStr;
use bloomchain::Config as BloomConfig;
use trace::Error;
@@ -29,6 +30,25 @@ pub enum Switch {
Auto,
}
+impl Default for Switch {
+ fn default() -> Self {
+ Switch::Auto
+ }
+}
+
+impl FromStr for Switch {
+ type Err = String;
+
+ fn from_str(s: &str) -> Result {
+ match s {
+ "on" => Ok(Switch::On),
+ "off" => Ok(Switch::Off),
+ "auto" => Ok(Switch::Auto),
+ other => Err(format!("Invalid switch value: {}", other))
+ }
+ }
+}
+
impl Switch {
/// Tries to turn old switch to new value.
pub fn turn_to(&self, to: Switch) -> Result {
@@ -41,7 +61,7 @@ impl Switch {
}
/// Traces config.
-#[derive(Debug, Clone)]
+#[derive(Debug, PartialEq, Clone)]
pub struct Config {
/// Indicates if tracing should be enabled or not.
/// If it's None, it will be automatically configured.
@@ -55,7 +75,7 @@ pub struct Config {
impl Default for Config {
fn default() -> Self {
Config {
- enabled: Switch::Auto,
+ enabled: Switch::default(),
blooms: BloomConfig {
levels: 3,
elements_per_index: 16,
@@ -64,3 +84,20 @@ impl Default for Config {
}
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::Switch;
+
+ #[test]
+ fn test_switch_parsing() {
+ assert_eq!(Switch::On, "on".parse().unwrap());
+ assert_eq!(Switch::Off, "off".parse().unwrap());
+ assert_eq!(Switch::Auto, "auto".parse().unwrap());
+ }
+
+ #[test]
+ fn test_switch_default() {
+ assert_eq!(Switch::default(), Switch::Auto);
+ }
+}
diff --git a/ethcore/src/verification/mod.rs b/ethcore/src/verification/mod.rs
index 10aee21f4..53c38a6b0 100644
--- a/ethcore/src/verification/mod.rs
+++ b/ethcore/src/verification/mod.rs
@@ -25,7 +25,7 @@ pub use self::canon_verifier::CanonVerifier;
pub use self::noop_verifier::NoopVerifier;
/// Verifier type.
-#[derive(Debug)]
+#[derive(Debug, PartialEq)]
pub enum VerifierType {
/// Verifies block normally.
Canon,
diff --git a/ethstore/src/dir/disk.rs b/ethstore/src/dir/disk.rs
index b94fe5214..af6ad917c 100644
--- a/ethstore/src/dir/disk.rs
+++ b/ethstore/src/dir/disk.rs
@@ -14,16 +14,18 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see .
-use std::{fs, ffi, io};
+use std::{fs, io};
use std::path::{PathBuf, Path};
use std::collections::HashMap;
use time;
use ethkey::Address;
-use {libc, json, SafeAccount, Error};
+use {json, SafeAccount, Error};
use super::KeyDirectory;
#[cfg(not(windows))]
fn restrict_permissions_to_owner(file_path: &Path) -> Result<(), i32> {
+ use std::ffi;
+ use libc;
let cstr = ffi::CString::new(file_path.to_str().unwrap()).unwrap();
match unsafe { libc::chmod(cstr.as_ptr(), libc::S_IWUSR | libc::S_IRUSR) } {
0 => Ok(()),
@@ -32,7 +34,7 @@ fn restrict_permissions_to_owner(file_path: &Path) -> Result<(), i32> {
}
#[cfg(windows)]
-fn restrict_permissions_to_owner(file_path: &Path) -> Result<(), i32> {
+fn restrict_permissions_to_owner(_file_path: &Path) -> Result<(), i32> {
Ok(())
}
diff --git a/logger/src/lib.rs b/logger/src/lib.rs
index 521c3a2d7..2a6c0bb35 100644
--- a/logger/src/lib.rs
+++ b/logger/src/lib.rs
@@ -36,39 +36,25 @@ use regex::Regex;
use util::RotatingLogger;
use util::log::Colour;
-pub struct Settings {
+#[derive(Debug, PartialEq)]
+pub struct Config {
+ pub mode: Option,
pub color: bool,
- pub init: Option,
pub file: Option,
}
-impl Settings {
- pub fn new() -> Settings {
- Settings {
- color: true,
- init: None,
+impl Default for Config {
+ fn default() -> Self {
+ Config {
+ mode: None,
+ color: !cfg!(windows),
file: None,
}
}
-
- pub fn init(mut self, init: String) -> Settings {
- self.init = Some(init);
- self
- }
-
- pub fn file(mut self, file: String) -> Settings {
- self.file = Some(file);
- self
- }
-
- pub fn no_color(mut self) -> Settings {
- self.color = false;
- self
- }
}
/// Sets up the logger
-pub fn setup_log(settings: &Settings) -> Arc {
+pub fn setup_log(config: &Config) -> Result, String> {
use rlog::*;
let mut levels = String::new();
@@ -84,16 +70,21 @@ pub fn setup_log(settings: &Settings) -> Arc {
builder.parse(lvl);
}
- if let Some(ref s) = settings.init {
+ if let Some(ref s) = config.mode {
levels.push_str(s);
builder.parse(s);
}
let isatty = stderr_isatty();
- let enable_color = settings.color && isatty;
+ let enable_color = config.color && isatty;
let logs = Arc::new(RotatingLogger::new(levels));
let logger = logs.clone();
- let maybe_file = settings.file.as_ref().map(|f| File::create(f).unwrap_or_else(|_| panic!("Cannot write to log file given: {}", f)));
+
+ let maybe_file = match config.file.as_ref() {
+ Some(f) => Some(try!(File::create(f).map_err(|_| format!("Cannot write to log file given: {}", f)))),
+ None => None,
+ };
+
let format = move |record: &LogRecord| {
let timestamp = time::strftime("%Y-%m-%d %H:%M:%S %Z", &time::now()).unwrap();
@@ -123,9 +114,11 @@ pub fn setup_log(settings: &Settings) -> Arc {
ret
};
+
builder.format(format);
builder.init().unwrap();
- logs
+
+ Ok(logs)
}
fn kill_color(s: &str) -> String {
diff --git a/parity/account.rs b/parity/account.rs
new file mode 100644
index 000000000..3c4a5dd74
--- /dev/null
+++ b/parity/account.rs
@@ -0,0 +1,84 @@
+// Copyright 2015, 2016 Ethcore (UK) Ltd.
+// This file is part of Parity.
+
+// Parity is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity. If not, see .
+
+use ethcore::ethstore::{EthStore, import_accounts};
+use ethcore::ethstore::dir::DiskDirectory;
+use ethcore::account_provider::AccountProvider;
+use helpers::{password_prompt, password_from_file};
+
+#[derive(Debug, PartialEq)]
+pub enum AccountCmd {
+ New(NewAccount),
+ List(String),
+ Import(ImportAccounts),
+}
+
+#[derive(Debug, PartialEq)]
+pub struct NewAccount {
+ pub iterations: u32,
+ pub path: String,
+ pub password_file: Option,
+}
+
+#[derive(Debug, PartialEq)]
+pub struct ImportAccounts {
+ pub from: Vec,
+ pub to: String,
+}
+
+pub fn execute(cmd: AccountCmd) -> Result {
+ match cmd {
+ AccountCmd::New(new_cmd) => new(new_cmd),
+ AccountCmd::List(path) => list(path),
+ AccountCmd::Import(import_cmd) => import(import_cmd),
+ }
+}
+
+fn new(n: NewAccount) -> Result {
+ let password: String = match n.password_file {
+ Some(file) => try!(password_from_file(file)),
+ None => try!(password_prompt()),
+ };
+
+ let dir = Box::new(DiskDirectory::create(n.path).unwrap());
+ let secret_store = Box::new(EthStore::open_with_iterations(dir, n.iterations).unwrap());
+ let acc_provider = AccountProvider::new(secret_store);
+ let new_account = acc_provider.new_account(&password).unwrap();
+ Ok(format!("{:?}", new_account))
+}
+
+fn list(path: String) -> Result {
+ let dir = Box::new(DiskDirectory::create(path).unwrap());
+ let secret_store = Box::new(EthStore::open(dir).unwrap());
+ let acc_provider = AccountProvider::new(secret_store);
+ let accounts = acc_provider.accounts();
+ let result = accounts.into_iter()
+ .map(|a| format!("{:?}", a))
+ .collect::>()
+ .join("\n");
+
+ Ok(result)
+}
+
+fn import(i: ImportAccounts) -> Result {
+ let to = DiskDirectory::create(i.to).unwrap();
+ let mut imported = 0;
+ for path in &i.from {
+ let from = DiskDirectory::at(path);
+ imported += try!(import_accounts(&from, &to).map_err(|_| "Importing accounts failed.")).len();
+ }
+ Ok(format!("{}", imported))
+}
diff --git a/parity/blockchain.rs b/parity/blockchain.rs
new file mode 100644
index 000000000..5843b9d03
--- /dev/null
+++ b/parity/blockchain.rs
@@ -0,0 +1,284 @@
+// Copyright 2015, 2016 Ethcore (UK) Ltd.
+// This file is part of Parity.
+
+// Parity is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity. If not, see .
+
+use std::str::{FromStr, from_utf8};
+use std::{io, fs};
+use std::io::{BufReader, BufRead};
+use std::time::Duration;
+use std::thread::sleep;
+use std::path::Path;
+use std::sync::Arc;
+use rustc_serialize::hex::FromHex;
+use ethcore_logger::{setup_log, Config as LogConfig};
+use util::panics::{PanicHandler, ForwardPanic};
+use util::{PayloadInfo, ToPretty};
+use ethcore::service::ClientService;
+use ethcore::client::{Mode, DatabaseCompactionProfile, Switch, VMType, BlockImportError, BlockChainClient, BlockID};
+use ethcore::error::ImportError;
+use ethcore::miner::Miner;
+use cache::CacheConfig;
+use informant::Informant;
+use params::{SpecType, Pruning};
+use helpers::{to_client_config, execute_upgrades};
+use dir::Directories;
+use fdlimit;
+
+#[derive(Debug, PartialEq)]
+pub enum DataFormat {
+ Hex,
+ Binary,
+}
+
+impl Default for DataFormat {
+ fn default() -> Self {
+ DataFormat::Binary
+ }
+}
+
+impl FromStr for DataFormat {
+ type Err = String;
+
+ fn from_str(s: &str) -> Result {
+ match s {
+ "binary" | "bin" => Ok(DataFormat::Binary),
+ "hex" => Ok(DataFormat::Hex),
+ x => Err(format!("Invalid format: {}", x))
+ }
+ }
+}
+
+#[derive(Debug, PartialEq)]
+pub enum BlockchainCmd {
+ Import(ImportBlockchain),
+ Export(ExportBlockchain),
+}
+
+#[derive(Debug, PartialEq)]
+pub struct ImportBlockchain {
+ pub spec: SpecType,
+ pub logger_config: LogConfig,
+ pub cache_config: CacheConfig,
+ pub dirs: Directories,
+ pub file_path: Option,
+ pub format: Option,
+ pub pruning: Pruning,
+ pub compaction: DatabaseCompactionProfile,
+ pub mode: Mode,
+ pub tracing: Switch,
+ pub vm_type: VMType,
+}
+
+#[derive(Debug, PartialEq)]
+pub struct ExportBlockchain {
+ pub spec: SpecType,
+ pub logger_config: LogConfig,
+ pub cache_config: CacheConfig,
+ pub dirs: Directories,
+ pub file_path: Option,
+ pub format: Option,
+ pub pruning: Pruning,
+ pub compaction: DatabaseCompactionProfile,
+ pub mode: Mode,
+ pub tracing: Switch,
+ pub from_block: BlockID,
+ pub to_block: BlockID,
+}
+
+pub fn execute(cmd: BlockchainCmd) -> Result {
+ match cmd {
+ BlockchainCmd::Import(import_cmd) => execute_import(import_cmd),
+ BlockchainCmd::Export(export_cmd) => execute_export(export_cmd),
+ }
+}
+
+fn execute_import(cmd: ImportBlockchain) -> Result {
+ // Setup panic handler
+ let panic_handler = PanicHandler::new_in_arc();
+
+ // load spec file
+ let spec = try!(cmd.spec.spec());
+
+ // load genesis hash
+ let genesis_hash = spec.genesis_header().hash();
+
+ // Setup logging
+ let _logger = setup_log(&cmd.logger_config);
+
+ fdlimit::raise_fd_limit();
+
+ // select pruning algorithm
+ let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref());
+
+ // prepare client_path
+ let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm);
+
+ // execute upgrades
+ try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm));
+
+ // prepare client config
+ let client_config = to_client_config(&cmd.cache_config, &cmd.dirs, genesis_hash, cmd.mode, cmd.tracing, cmd.pruning, cmd.compaction, cmd.vm_type, "".into(), spec.fork_name.as_ref());
+
+ // build client
+ let service = try!(ClientService::start(
+ client_config,
+ spec,
+ Path::new(&client_path),
+ Arc::new(Miner::with_spec(try!(cmd.spec.spec()))),
+ ).map_err(|e| format!("Client service error: {:?}", e)));
+
+ panic_handler.forward_from(&service);
+ let client = service.client();
+
+ let mut instream: Box = match cmd.file_path {
+ Some(f) => Box::new(try!(fs::File::open(&f).map_err(|_| format!("Cannot open given file: {}", f)))),
+ None => Box::new(io::stdin()),
+ };
+
+ const READAHEAD_BYTES: usize = 8;
+
+ let mut first_bytes: Vec = vec![0; READAHEAD_BYTES];
+ let mut first_read = 0;
+
+ let format = match cmd.format {
+ Some(format) => format,
+ None => {
+ first_read = try!(instream.read(&mut first_bytes).map_err(|_| "Error reading from the file/stream."));
+ match first_bytes[0] {
+ 0xf9 => DataFormat::Binary,
+ _ => DataFormat::Hex,
+ }
+ }
+ };
+
+ let informant = Informant::new(client.clone(), None, None, cmd.logger_config.color);
+
+ let do_import = |bytes| {
+ while client.queue_info().is_full() { sleep(Duration::from_secs(1)); }
+ match client.import_block(bytes) {
+ Err(BlockImportError::Import(ImportError::AlreadyInChain)) => {
+ trace!("Skipping block already in chain.");
+ }
+ Err(e) => {
+ return Err(format!("Cannot import block: {:?}", e));
+ },
+ Ok(_) => {},
+ }
+ informant.tick();
+ Ok(())
+ };
+
+
+ match format {
+ DataFormat::Binary => {
+ loop {
+ let mut bytes = if first_read > 0 {first_bytes.clone()} else {vec![0; READAHEAD_BYTES]};
+ let n = if first_read > 0 {
+ first_read
+ } else {
+ try!(instream.read(&mut bytes).map_err(|_| "Error reading from the file/stream."))
+ };
+ if n == 0 { break; }
+ first_read = 0;
+ let s = try!(PayloadInfo::from(&bytes).map_err(|e| format!("Invalid RLP in the file/stream: {:?}", e))).total();
+ bytes.resize(s, 0);
+ try!(instream.read_exact(&mut bytes[READAHEAD_BYTES..]).map_err(|_| "Error reading from the file/stream."));
+ try!(do_import(bytes));
+ }
+ }
+ DataFormat::Hex => {
+ for line in BufReader::new(instream).lines() {
+ let s = try!(line.map_err(|_| "Error reading from the file/stream."));
+ let s = if first_read > 0 {from_utf8(&first_bytes).unwrap().to_owned() + &(s[..])} else {s};
+ first_read = 0;
+ let bytes = try!(s.from_hex().map_err(|_| "Invalid hex in file/stream."));
+ try!(do_import(bytes));
+ }
+ }
+ }
+ client.flush_queue();
+
+ Ok("Import completed.".into())
+}
+
+fn execute_export(cmd: ExportBlockchain) -> Result {
+ // Setup panic handler
+ let panic_handler = PanicHandler::new_in_arc();
+
+ let format = cmd.format.unwrap_or_else(Default::default);
+
+ // load spec file
+ let spec = try!(cmd.spec.spec());
+
+ // load genesis hash
+ let genesis_hash = spec.genesis_header().hash();
+
+ // Setup logging
+ let _logger = setup_log(&cmd.logger_config);
+
+ fdlimit::raise_fd_limit();
+
+ // select pruning algorithm
+ let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref());
+
+ // prepare client_path
+ let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm);
+
+ // execute upgrades
+ try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm));
+
+ // prepare client config
+ let client_config = to_client_config(&cmd.cache_config, &cmd.dirs, genesis_hash, cmd.mode, cmd.tracing, cmd.pruning, cmd.compaction, VMType::default(), "".into(), spec.fork_name.as_ref());
+
+ let service = try!(ClientService::start(
+ client_config,
+ spec,
+ Path::new(&client_path),
+ Arc::new(Miner::with_spec(try!(cmd.spec.spec())))
+ ).map_err(|e| format!("Client service error: {:?}", e)));
+
+ panic_handler.forward_from(&service);
+ let client = service.client();
+
+ let mut out: Box = match cmd.file_path {
+ Some(f) => Box::new(try!(fs::File::create(&f).map_err(|_| format!("Cannot write to file given: {}", f)))),
+ None => Box::new(io::stdout()),
+ };
+
+ let from = try!(client.block_number(cmd.from_block).ok_or("From block could not be found"));
+ let to = try!(client.block_number(cmd.to_block).ok_or("From block could not be found"));
+
+ for i in from..(to + 1) {
+ let b = client.block(BlockID::Number(i)).unwrap();
+ match format {
+ DataFormat::Binary => { out.write(&b).expect("Couldn't write to stream."); }
+ DataFormat::Hex => { out.write_fmt(format_args!("{}", b.pretty())).expect("Couldn't write to stream."); }
+ }
+ }
+
+ Ok("Export completed.".into())
+}
+
+#[cfg(test)]
+mod test {
+ use super::DataFormat;
+
+ #[test]
+ fn test_data_format_parsing() {
+ assert_eq!(DataFormat::Binary, "binary".parse().unwrap());
+ assert_eq!(DataFormat::Binary, "bin".parse().unwrap());
+ assert_eq!(DataFormat::Hex, "hex".parse().unwrap());
+ }
+}
diff --git a/parity/cache.rs b/parity/cache.rs
new file mode 100644
index 000000000..45f1cb5f5
--- /dev/null
+++ b/parity/cache.rs
@@ -0,0 +1,109 @@
+// Copyright 2015, 2016 Ethcore (UK) Ltd.
+// This file is part of Parity.
+
+// Parity is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity. If not, see .
+
+use std::cmp::max;
+
+const MIN_BC_CACHE_MB: u32 = 4;
+const MIN_DB_CACHE_MB: u32 = 2;
+const MIN_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 16;
+const DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 50;
+
+/// Configuration for application cache sizes.
+/// All values are represented in MB.
+#[derive(Debug, PartialEq)]
+pub struct CacheConfig {
+ /// Size of database cache set using option `set_block_cache_size_mb`
+ /// 50% is blockchain
+ /// 25% is tracing
+ /// 25% is state
+ db: u32,
+ /// Size of blockchain cache.
+ blockchain: u32,
+ /// Size of transaction queue cache.
+ queue: u32,
+}
+
+impl Default for CacheConfig {
+ fn default() -> Self {
+ CacheConfig::new(64, 8, DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB)
+ }
+}
+
+impl CacheConfig {
+ /// Creates new cache config with cumulative size equal `total`.
+ pub fn new_with_total_cache_size(total: u32) -> Self {
+ CacheConfig {
+ db: total * 7 / 8,
+ blockchain: total / 8,
+ queue: DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB,
+ }
+ }
+
+ /// Creates new cache config with gitven details.
+ pub fn new(db: u32, blockchain: u32, queue: u32) -> Self {
+ CacheConfig {
+ db: db,
+ blockchain: blockchain,
+ queue: queue,
+ }
+ }
+
+ /// Size of db cache for blockchain.
+ pub fn db_blockchain_cache_size(&self) -> u32 {
+ max(MIN_DB_CACHE_MB, self.blockchain / 4)
+ }
+
+ /// Size of db cache for state.
+ pub fn db_state_cache_size(&self) -> u32 {
+ max(MIN_DB_CACHE_MB, self.db * 3 / 4)
+ }
+
+ /// Size of block queue size limit
+ pub fn queue(&self) -> u32 {
+ max(self.queue, MIN_BLOCK_QUEUE_SIZE_LIMIT_MB)
+ }
+
+ /// Size of the blockchain cache.
+ pub fn blockchain(&self) -> u32 {
+ max(self.blockchain, MIN_BC_CACHE_MB)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::CacheConfig;
+
+ #[test]
+ fn test_cache_config_constructor() {
+ let config = CacheConfig::new_with_total_cache_size(200);
+ assert_eq!(config.db, 175);
+ assert_eq!(config.blockchain(), 25);
+ assert_eq!(config.queue(), 50);
+ }
+
+ #[test]
+ fn test_cache_config_db_cache_sizes() {
+ let config = CacheConfig::new_with_total_cache_size(400);
+ assert_eq!(config.db, 350);
+ assert_eq!(config.db_blockchain_cache_size(), 12);
+ assert_eq!(config.db_state_cache_size(), 262);
+ }
+
+ #[test]
+ fn test_cache_config_default() {
+ assert_eq!(CacheConfig::default(), CacheConfig::new(64, 8, super::DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB));
+ }
+}
diff --git a/parity/cli.rs b/parity/cli.rs
index 60aca8310..11d58eb22 100644
--- a/parity/cli.rs
+++ b/parity/cli.rs
@@ -15,6 +15,7 @@
// along with Parity. If not, see .
use util::version;
+use docopt::Docopt;
pub const USAGE: &'static str = r#"
Parity. Ethereum Client.
@@ -22,6 +23,8 @@ Parity. Ethereum Client.
Copyright 2015, 2016 Ethcore (UK) Limited
Usage:
+ parity [options]
+ parity ui [options]
parity daemon [options]
parity account (new | list ) [options]
parity account import ... [options]
@@ -29,8 +32,6 @@ Usage:
parity import [ ] [options]
parity export [ ] [options]
parity signer new-token [options]
- parity [options]
- parity ui [options]
Operating Options:
--mode MODE Set the operating mode. MODE can be one of:
@@ -105,8 +106,8 @@ API and Console Options:
--jsonrpc-apis APIS Specify the APIs available through the JSONRPC
interface. APIS is a comma-delimited list of API
name. Possible name are web3, eth, net, personal,
- ethcore, ethcore_set, traces.
- [default: web3,eth,net,ethcore,personal,traces].
+ ethcore, ethcore_set, traces, rpc.
+ [default: web3,eth,net,ethcore,personal,traces,rpc].
--jsonrpc-hosts HOSTS List of allowed Host header values. This option will
validate the Host header sent by the browser, it
is additional security against some attack
@@ -201,18 +202,16 @@ Footprint Options:
fast - maintain journal overlay. Fast but 50MB used.
auto - use the method most recently synced or
default to fast if none synced [default: auto].
- --cache-pref-size BYTES Specify the preferred size of the blockchain cache in
- bytes [default: 16384].
- --cache-max-size BYTES Specify the maximum size of the blockchain cache in
- bytes [default: 262144].
- --queue-max-size BYTES Specify the maximum size of memory to use for block
- queue [default: 52428800].
- --cache MEGABYTES Set total amount of discretionary memory to use for
+ --cache-size-db MB Override database cache size [default: 64].
+ --cache-size-blocks MB Specify the prefered size of the blockchain cache in
+ megabytes [default: 8].
+ --cache-size-queue MB Specify the maximum size of memory to use for block
+ queue [default: 50].
+ --cache-size MB Set total amount of discretionary memory to use for
the entire system, overrides other cache and queue
options.
Database Options:
- --db-cache-size MB Override RocksDB database cache size.
--db-compaction TYPE Database compaction type. TYPE may be one of:
ssd - suitable for SSDs and fast HDDs;
hdd - suitable for slow HDDs [default: ssd].
@@ -260,6 +259,7 @@ Legacy Options:
--basic-tx-usd.
--etherbase ADDRESS Equivalent to --author ADDRESS.
--extradata STRING Equivalent to --extra-data STRING.
+ --cache MB Equivalent to --cache-size MB.
Miscellaneous Options:
-l --logging LOGGING Specify the logging level. Must conform to the same
@@ -271,7 +271,7 @@ Miscellaneous Options:
-h --help Show this screen.
"#;
-#[derive(Debug, RustcDecodable)]
+#[derive(Debug, PartialEq, RustcDecodable)]
pub struct Args {
pub cmd_daemon: bool,
pub cmd_account: bool,
@@ -294,7 +294,6 @@ pub struct Args {
pub flag_identity: String,
pub flag_unlock: Option,
pub flag_password: Vec,
- pub flag_cache: Option,
pub flag_keys_path: String,
pub flag_keys_iterations: u32,
pub flag_no_import_keys: bool,
@@ -309,9 +308,13 @@ pub struct Args {
pub flag_node_key: Option,
pub flag_reserved_peers: Option,
pub flag_reserved_only: bool,
- pub flag_cache_pref_size: usize,
- pub flag_cache_max_size: usize,
- pub flag_queue_max_size: usize,
+
+ pub flag_cache_size_db: u32,
+ pub flag_cache_size_blocks: u32,
+ pub flag_cache_size_queue: u32,
+ pub flag_cache_size: Option,
+ pub flag_cache: Option,
+
pub flag_no_jsonrpc: bool,
pub flag_jsonrpc_interface: String,
pub flag_jsonrpc_port: u16,
@@ -380,13 +383,18 @@ pub struct Args {
pub flag_dapps_off: bool,
pub flag_ipcpath: Option,
pub flag_ipcapi: Option,
- pub flag_db_cache_size: Option,
pub flag_db_compaction: String,
pub flag_fat_db: bool,
}
-pub fn print_version() {
- println!("\
+impl Default for Args {
+ fn default() -> Self {
+ Docopt::new(USAGE).unwrap().argv(&[] as &[&str]).decode().unwrap()
+ }
+}
+
+pub fn print_version() -> String {
+ format!("\
Parity
version {}
Copyright 2015, 2016 Ethcore (UK) Limited
@@ -395,6 +403,6 @@ This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
By Wood/Paronyan/Kotewicz/Drwięga/Volf.\
-", version());
+", version())
}
diff --git a/parity/configuration.rs b/parity/configuration.rs
index ce9b7d679..8ac7a2af0 100644
--- a/parity/configuration.rs
+++ b/parity/configuration.rs
@@ -14,57 +14,228 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see .
-use std::env;
-use std::fs::File;
use std::time::Duration;
-use std::io::{BufRead, BufReader};
-use std::net::{SocketAddr, IpAddr};
+use std::io::Read;
+use std::net::SocketAddr;
use std::path::PathBuf;
use cli::{USAGE, Args};
-use docopt::Docopt;
-
-use die::*;
-use util::*;
-use util::log::Colour::*;
-use ethcore::account_provider::AccountProvider;
+use docopt::{Docopt, Error as DocoptError};
+use util::{Hashable, NetworkConfiguration, U256, Uint, is_valid_node_url, Bytes, version_data, Secret, Address};
use util::network_settings::NetworkSettings;
-use ethcore::client::{append_path, get_db_path, Mode, ClientConfig, DatabaseCompactionProfile, Switch, VMType};
-use ethcore::miner::{MinerOptions, PendingSet, GasPricer, GasPriceCalibratorOptions};
-use ethcore::ethereum;
-use ethcore::spec::Spec;
-use ethsync::SyncConfig;
-use rpc::IpcConfiguration;
-use ethcore_logger::Settings as LogSettings;
+use util::log::Colour;
+use ethcore::client::{VMType, Mode};
+use ethcore::miner::MinerOptions;
-pub struct Configuration {
- pub args: Args
+use rpc::{IpcConfiguration, HttpConfiguration};
+use cache::CacheConfig;
+use helpers::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_price, replace_home,
+geth_ipc_path, parity_ipc_path, to_bootnodes, to_addresses, to_address};
+use params::{ResealPolicy, AccountsConfig, GasPricerConfig, MinerExtras};
+use ethcore_logger::Config as LogConfig;
+use dir::Directories;
+use dapps::Configuration as DappsConfiguration;
+use signer::Configuration as SignerConfiguration;
+use run::RunCmd;
+use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain};
+use presale::ImportWallet;
+use account::{AccountCmd, NewAccount, ImportAccounts};
+
+#[derive(Debug, PartialEq)]
+pub enum Cmd {
+ Run(RunCmd),
+ Version,
+ Account(AccountCmd),
+ ImportPresaleWallet(ImportWallet),
+ Blockchain(BlockchainCmd),
+ SignerToken(String),
}
-pub struct Directories {
- pub keys: String,
- pub db: String,
- pub dapps: String,
- pub signer: String,
+#[derive(Debug, PartialEq)]
+pub struct Configuration {
+ pub args: Args,
}
impl Configuration {
- pub fn parse() -> Self {
- Configuration {
- args: Docopt::new(USAGE).and_then(|d| d.decode()).unwrap_or_else(|e| e.exit()),
+ pub fn parse(command: I) -> Result where I: IntoIterator, S: AsRef {
+ let args = try!(Docopt::new(USAGE).and_then(|d| d.argv(command).decode()));
+
+ let config = Configuration {
+ args: args,
+ };
+
+ Ok(config)
+ }
+
+ pub fn into_command(self) -> Result {
+ let dirs = self.directories();
+ let pruning = try!(self.args.flag_pruning.parse());
+ let vm_type = try!(self.vm_type());
+ let mode = try!(to_mode(&self.args.flag_mode, self.args.flag_mode_timeout, self.args.flag_mode_alarm));
+ let miner_options = try!(self.miner_options());
+ let logger_config = self.logger_config();
+ let http_conf = try!(self.http_config());
+ let ipc_conf = try!(self.ipc_config());
+ let net_conf = try!(self.net_config());
+ let network_id = try!(self.network_id());
+ let cache_config = self.cache_config();
+ let spec = try!(self.chain().parse());
+ let tracing = try!(self.args.flag_tracing.parse());
+ let compaction = try!(self.args.flag_db_compaction.parse());
+ let enable_network = self.enable_network(&mode);
+ let geth_compatibility = self.args.flag_geth;
+ let signer_port = self.signer_port();
+ let dapps_conf = self.dapps_config();
+ let signer_conf = self.signer_config();
+
+ let cmd = if self.args.flag_version {
+ Cmd::Version
+ } else if self.args.cmd_signer {
+ Cmd::SignerToken(dirs.signer)
+ } else if self.args.cmd_account {
+ let account_cmd = if self.args.cmd_new {
+ let new_acc = NewAccount {
+ iterations: self.args.flag_keys_iterations,
+ path: dirs.keys,
+ password_file: self.args.flag_password.first().cloned(),
+ };
+ AccountCmd::New(new_acc)
+ } else if self.args.cmd_list {
+ AccountCmd::List(dirs.keys)
+ } else if self.args.cmd_import {
+ let import_acc = ImportAccounts {
+ from: self.args.arg_path.clone(),
+ to: dirs.keys,
+ };
+ AccountCmd::Import(import_acc)
+ } else {
+ unreachable!();
+ };
+ Cmd::Account(account_cmd)
+ } else if self.args.cmd_wallet {
+ let presale_cmd = ImportWallet {
+ iterations: self.args.flag_keys_iterations,
+ path: dirs.keys,
+ wallet_path: self.args.arg_path.first().unwrap().clone(),
+ password_file: self.args.flag_password.first().cloned(),
+ };
+ Cmd::ImportPresaleWallet(presale_cmd)
+ } else if self.args.cmd_import {
+ let import_cmd = ImportBlockchain {
+ spec: spec,
+ logger_config: logger_config,
+ cache_config: cache_config,
+ dirs: dirs,
+ file_path: self.args.arg_file.clone(),
+ format: None,
+ pruning: pruning,
+ compaction: compaction,
+ mode: mode,
+ tracing: tracing,
+ vm_type: vm_type,
+ };
+ Cmd::Blockchain(BlockchainCmd::Import(import_cmd))
+ } else if self.args.cmd_export {
+ let export_cmd = ExportBlockchain {
+ spec: spec,
+ logger_config: logger_config,
+ cache_config: cache_config,
+ dirs: dirs,
+ file_path: self.args.arg_file.clone(),
+ format: None,
+ pruning: pruning,
+ compaction: compaction,
+ mode: mode,
+ tracing: tracing,
+ from_block: try!(to_block_id(&self.args.flag_from)),
+ to_block: try!(to_block_id(&self.args.flag_to)),
+ };
+ Cmd::Blockchain(BlockchainCmd::Export(export_cmd))
+ } else {
+ let daemon = if self.args.cmd_daemon {
+ Some(self.args.arg_pid_file.clone())
+ } else {
+ None
+ };
+
+ let run_cmd = RunCmd {
+ cache_config: cache_config,
+ dirs: dirs,
+ spec: spec,
+ pruning: pruning,
+ daemon: daemon,
+ logger_config: logger_config,
+ miner_options: miner_options,
+ http_conf: http_conf,
+ ipc_conf: ipc_conf,
+ net_conf: net_conf,
+ network_id: network_id,
+ acc_conf: try!(self.accounts_config()),
+ gas_pricer: try!(self.gas_pricer_config()),
+ miner_extras: try!(self.miner_extras()),
+ mode: mode,
+ tracing: tracing,
+ compaction: compaction,
+ vm_type: vm_type,
+ enable_network: enable_network,
+ geth_compatibility: geth_compatibility,
+ signer_port: signer_port,
+ net_settings: self.network_settings(),
+ dapps_conf: dapps_conf,
+ signer_conf: signer_conf,
+ ui: self.args.cmd_ui,
+ name: self.args.flag_identity,
+ custom_bootnodes: self.args.flag_bootnodes.is_some(),
+ };
+ Cmd::Run(run_cmd)
+ };
+
+ Ok(cmd)
+ }
+
+ fn enable_network(&self, mode: &Mode) -> bool {
+ match *mode {
+ Mode::Dark(_) => false,
+ _ => !self.args.flag_no_network,
}
}
- pub fn mode(&self) -> Mode {
- match &(self.args.flag_mode[..]) {
- "active" => Mode::Active,
- "passive" => Mode::Passive(Duration::from_secs(self.args.flag_mode_timeout), Duration::from_secs(self.args.flag_mode_alarm)),
- "dark" => Mode::Dark(Duration::from_secs(self.args.flag_mode_timeout)),
- _ => die!("{}: Invalid address for --mode. Must be one of active, passive or dark.", self.args.flag_mode),
+ fn vm_type(&self) -> Result {
+ if self.args.flag_jitvm {
+ VMType::jit().ok_or("Parity is built without the JIT EVM.".into())
+ } else {
+ Ok(VMType::Interpreter)
}
}
- fn net_port(&self) -> u16 {
- self.args.flag_port
+ fn miner_extras(&self) -> Result {
+ let extras = MinerExtras {
+ author: try!(self.author()),
+ extra_data: try!(self.extra_data()),
+ gas_floor_target: try!(to_u256(&self.args.flag_gas_floor_target)),
+ gas_ceil_target: try!(to_u256(&self.args.flag_gas_cap)),
+ transactions_limit: self.args.flag_tx_queue_size,
+ };
+
+ Ok(extras)
+ }
+
+ fn author(&self) -> Result {
+ to_address(self.args.flag_etherbase.clone().or(self.args.flag_author.clone()))
+ }
+
+ fn cache_config(&self) -> CacheConfig {
+ match self.args.flag_cache_size.or(self.args.flag_cache) {
+ Some(size) => CacheConfig::new_with_total_cache_size(size),
+ None => CacheConfig::new(self.args.flag_cache_size_db, self.args.flag_cache_size_blocks, self.args.flag_cache_size_queue),
+ }
+ }
+
+ fn logger_config(&self) -> LogConfig {
+ LogConfig {
+ mode: self.args.flag_logging.clone(),
+ color: !self.args.flag_no_color && !cfg!(windows),
+ file: self.args.flag_log_file.clone(),
+ }
}
fn chain(&self) -> String {
@@ -79,358 +250,168 @@ impl Configuration {
self.args.flag_maxpeers.unwrap_or(self.args.flag_peers) as u32
}
- fn decode_u256(d: &str, argument: &str) -> U256 {
- U256::from_dec_str(d).unwrap_or_else(|_|
- U256::from_str(clean_0x(d)).unwrap_or_else(|_|
- die!("{}: Invalid numeric value for {}. Must be either a decimal or a hex number.", d, argument)
- )
- )
- }
-
fn work_notify(&self) -> Vec {
self.args.flag_notify_work.as_ref().map_or_else(Vec::new, |s| s.split(',').map(|s| s.to_owned()).collect())
}
- pub fn miner_options(&self) -> MinerOptions {
- let (own, ext) = match self.args.flag_reseal_on_txs.as_str() {
- "none" => (false, false),
- "own" => (true, false),
- "ext" => (false, true),
- "all" => (true, true),
- x => die!("{}: Invalid value for --reseal option. Use --help for more information.", x)
+ fn accounts_config(&self) -> Result {
+ let cfg = AccountsConfig {
+ iterations: self.args.flag_keys_iterations,
+ import_keys: !self.args.flag_no_import_keys,
+ testnet: self.args.flag_testnet,
+ password_files: self.args.flag_password.clone(),
+ unlocked_accounts: try!(to_addresses(&self.args.flag_unlock)),
};
- MinerOptions {
+
+ Ok(cfg)
+ }
+
+ fn miner_options(&self) -> Result {
+ let reseal = try!(self.args.flag_reseal_on_txs.parse::());
+
+ let options = MinerOptions {
new_work_notify: self.work_notify(),
force_sealing: self.args.flag_force_sealing,
- reseal_on_external_tx: ext,
- reseal_on_own_tx: own,
- tx_gas_limit: self.args.flag_tx_gas_limit.as_ref().map_or(!U256::zero(), |d| Self::decode_u256(d, "--tx-gas-limit")),
- tx_queue_size: self.args.flag_tx_queue_size,
- pending_set: match self.args.flag_relay_set.as_str() {
- "cheap" => PendingSet::AlwaysQueue,
- "strict" => PendingSet::AlwaysSealing,
- "lenient" => PendingSet::SealingOrElseQueue,
- x => die!("{}: Invalid value for --relay-set option. Use --help for more information.", x)
+ reseal_on_external_tx: reseal.external,
+ reseal_on_own_tx: reseal.own,
+ tx_gas_limit: match self.args.flag_tx_gas_limit {
+ Some(ref d) => try!(to_u256(d)),
+ None => U256::max_value(),
},
+ tx_queue_size: self.args.flag_tx_queue_size,
+ pending_set: try!(to_pending_set(&self.args.flag_relay_set)),
reseal_min_period: Duration::from_millis(self.args.flag_reseal_min_period),
work_queue_size: self.args.flag_work_queue_size,
enable_resubmission: !self.args.flag_remove_solved,
- }
- }
-
- pub fn author(&self) -> Option {
- self.args.flag_etherbase.as_ref()
- .or(self.args.flag_author.as_ref())
- .map(|d| Address::from_str(clean_0x(d)).unwrap_or_else(|_| {
- die!("{}: Invalid address for --author. Must be 40 hex characters, with or without the 0x at the beginning.", d)
- }))
- }
-
- pub fn gas_floor_target(&self) -> U256 {
- let d = &self.args.flag_gas_floor_target;
- U256::from_dec_str(d).unwrap_or_else(|_| {
- die!("{}: Invalid target gas floor given. Must be a decimal unsigned 256-bit number.", d)
- })
- }
-
- pub fn gas_ceil_target(&self) -> U256 {
- let d = &self.args.flag_gas_cap;
- U256::from_dec_str(d).unwrap_or_else(|_| {
- die!("{}: Invalid target gas ceiling given. Must be a decimal unsigned 256-bit number.", d)
- })
- }
-
- fn to_duration(s: &str) -> Duration {
- let bad = |_| {
- die!("{}: Invalid duration given. See parity --help for more information.", s)
};
- Duration::from_secs(match s {
- "twice-daily" => 12 * 60 * 60,
- "half-hourly" => 30 * 60,
- "1second" | "1 second" | "second" => 1,
- "1minute" | "1 minute" | "minute" => 60,
- "hourly" | "1hour" | "1 hour" | "hour" => 60 * 60,
- "daily" | "1day" | "1 day" | "day" => 24 * 60 * 60,
- x if x.ends_with("seconds") => FromStr::from_str(&x[0..x.len() - 7]).unwrap_or_else(bad),
- x if x.ends_with("minutes") => FromStr::from_str(&x[0..x.len() - 7]).unwrap_or_else(bad) * 60,
- x if x.ends_with("hours") => FromStr::from_str(&x[0..x.len() - 5]).unwrap_or_else(bad) * 60 * 60,
- x if x.ends_with("days") => FromStr::from_str(&x[0..x.len() - 4]).unwrap_or_else(bad) * 24 * 60 * 60,
- x => FromStr::from_str(x).unwrap_or_else(bad),
- })
+
+ Ok(options)
}
- pub fn gas_pricer(&self) -> GasPricer {
- match self.args.flag_gasprice.as_ref() {
- Some(d) => {
- GasPricer::Fixed(U256::from_dec_str(d).unwrap_or_else(|_| {
- die!("{}: Invalid gas price given. Must be a decimal unsigned 256-bit number.", d)
- }))
- }
- _ => {
- let usd_per_tx: f32 = FromStr::from_str(&self.args.flag_usd_per_tx).unwrap_or_else(|_| {
- die!("{}: Invalid basic transaction price given in USD. Must be a decimal number.", self.args.flag_usd_per_tx)
- });
- match self.args.flag_usd_per_eth.as_str() {
- "auto" => {
- GasPricer::new_calibrated(GasPriceCalibratorOptions {
- usd_per_tx: usd_per_tx,
- recalibration_period: Self::to_duration(self.args.flag_price_update_period.as_str()),
- })
- },
- x => {
- let usd_per_eth: f32 = FromStr::from_str(x).unwrap_or_else(|_| die!("{}: Invalid ether price given in USD. Must be a decimal number.", x));
- let wei_per_usd: f32 = 1.0e18 / usd_per_eth;
- let gas_per_tx: f32 = 21000.0;
- let wei_per_gas: f32 = wei_per_usd * usd_per_tx / gas_per_tx;
- info!("Using a fixed conversion rate of Ξ1 = {} ({} wei/gas)", White.bold().paint(format!("US${}", usd_per_eth)), Yellow.bold().paint(format!("{}", wei_per_gas)));
- GasPricer::Fixed(U256::from_dec_str(&format!("{:.0}", wei_per_gas)).unwrap())
- }
- }
- }
+ fn signer_config(&self) -> SignerConfiguration {
+ SignerConfiguration {
+ enabled: self.signer_enabled(),
+ port: self.args.flag_signer_port,
+ signer_path: self.directories().signer,
}
}
- pub fn extra_data(&self) -> Bytes {
+ fn dapps_config(&self) -> DappsConfiguration {
+ DappsConfiguration {
+ enabled: self.dapps_enabled(),
+ interface: self.dapps_interface(),
+ port: self.args.flag_dapps_port,
+ user: self.args.flag_dapps_user.clone(),
+ pass: self.args.flag_dapps_pass.clone(),
+ dapps_path: self.directories().dapps,
+ }
+ }
+
+ fn gas_pricer_config(&self) -> Result {
+ if let Some(d) = self.args.flag_gasprice.as_ref() {
+ return Ok(GasPricerConfig::Fixed(try!(to_u256(d))));
+ }
+
+ let usd_per_tx = try!(to_price(&self.args.flag_usd_per_tx));
+ if "auto" == self.args.flag_usd_per_eth.as_str() {
+ return Ok(GasPricerConfig::Calibrated {
+ usd_per_tx: usd_per_tx,
+ recalibration_period: try!(to_duration(self.args.flag_price_update_period.as_str())),
+ });
+ }
+
+ let usd_per_eth = try!(to_price(&self.args.flag_usd_per_eth));
+ let wei_per_usd: f32 = 1.0e18 / usd_per_eth;
+ let gas_per_tx: f32 = 21000.0;
+ let wei_per_gas: f32 = wei_per_usd * usd_per_tx / gas_per_tx;
+
+ info!(
+ "Using a fixed conversion rate of Ξ1 = {} ({} wei/gas)",
+ Colour::White.bold().paint(format!("US${}", usd_per_eth)),
+ Colour::Yellow.bold().paint(format!("{}", wei_per_gas))
+ );
+
+ Ok(GasPricerConfig::Fixed(U256::from_dec_str(&format!("{:.0}", wei_per_gas)).unwrap()))
+ }
+
+ fn extra_data(&self) -> Result {
match self.args.flag_extradata.as_ref().or(self.args.flag_extra_data.as_ref()) {
- Some(ref x) if x.len() <= 32 => x.as_bytes().to_owned(),
- None => version_data(),
- Some(ref x) => { die!("{}: Extra data must be at most 32 characters.", x); }
+ Some(ref x) if x.len() <= 32 => Ok(x.as_bytes().to_owned()),
+ None => Ok(version_data()),
+ Some(_) => Err("Extra data must be at most 32 characters".into()),
}
}
- pub fn spec(&self) -> Spec {
- match self.chain().as_str() {
- "frontier" | "homestead" | "mainnet" => ethereum::new_frontier(),
- "frontier-dogmatic" | "homestead-dogmatic" | "classic" => ethereum::new_classic(),
- "morden" | "testnet" => ethereum::new_morden(),
- "olympic" => ethereum::new_olympic(),
- f => Spec::load(contents(f).unwrap_or_else(|_| {
- die!("{}: Couldn't read chain specification file. Sure it exists?", f)
- }).as_ref()),
- }
- }
-
- pub fn normalize_enode(e: &str) -> Option {
- if is_valid_node_url(e) {
- Some(e.to_owned())
- } else {
- None
- }
- }
-
- pub fn init_nodes(&self, spec: &Spec) -> Vec {
- match self.args.flag_bootnodes {
- Some(ref x) if !x.is_empty() => x.split(',').map(|s| {
- Self::normalize_enode(s).unwrap_or_else(|| {
- die!("{}: Invalid node address format given for a boot node.", s)
- })
- }).collect(),
- Some(_) => Vec::new(),
- None => spec.nodes().to_owned(),
- }
- }
-
- pub fn init_reserved_nodes(&self) -> Vec {
+ fn init_reserved_nodes(&self) -> Result, String> {
use std::fs::File;
- if let Some(ref path) = self.args.flag_reserved_peers {
- let mut buffer = String::new();
- let mut node_file = File::open(path).unwrap_or_else(|e| {
- die!("Error opening reserved nodes file: {}", e);
- });
- node_file.read_to_string(&mut buffer).expect("Error reading reserved node file");
- buffer.lines().map(|s| {
- Self::normalize_enode(s).unwrap_or_else(|| {
- die!("{}: Invalid node address format given for a reserved node.", s);
- })
- }).collect()
- } else {
- Vec::new()
+ match self.args.flag_reserved_peers {
+ Some(ref path) => {
+ let mut buffer = String::new();
+ let mut node_file = try!(File::open(path).map_err(|e| format!("Error opening reserved nodes file: {}", e)));
+ try!(node_file.read_to_string(&mut buffer).map_err(|_| "Error reading reserved node file"));
+ if let Some(invalid) = buffer.lines().find(|s| !is_valid_node_url(s)) {
+ Err(format!("Invalid node address format given for a boot node: {}", invalid))
+ } else {
+ Ok(buffer.lines().map(|s| s.to_owned()).collect())
+ }
+ },
+ None => Ok(Vec::new())
}
}
- pub fn net_addresses(&self) -> (Option, Option) {
- let port = self.net_port();
- let listen_address = Some(SocketAddr::new(IpAddr::from_str("0.0.0.0").unwrap(), port));
+ fn net_addresses(&self) -> Result<(Option, Option), String> {
+ let port = self.args.flag_port;
+ let listen_address = Some(SocketAddr::new("0.0.0.0".parse().unwrap(), port));
let public_address = if self.args.flag_nat.starts_with("extip:") {
let host = &self.args.flag_nat[6..];
- let host = IpAddr::from_str(host).unwrap_or_else(|_| die!("Invalid host given with `--nat extip:{}`", host));
+ let host = try!(host.parse().map_err(|_| format!("Invalid host given with `--nat extip:{}`", host)));
Some(SocketAddr::new(host, port))
} else {
None
};
- (listen_address, public_address)
+ Ok((listen_address, public_address))
}
- pub fn net_settings(&self, spec: &Spec) -> NetworkConfiguration {
+ fn net_config(&self) -> Result {
let mut ret = NetworkConfiguration::new();
ret.nat_enabled = self.args.flag_nat == "any" || self.args.flag_nat == "upnp";
- ret.boot_nodes = self.init_nodes(spec);
- let (listen, public) = self.net_addresses();
+ ret.boot_nodes = try!(to_bootnodes(&self.args.flag_bootnodes));
+ let (listen, public) = try!(self.net_addresses());
ret.listen_address = listen;
ret.public_address = public;
- ret.use_secret = self.args.flag_node_key.as_ref().map(|s| Secret::from_str(s).unwrap_or_else(|_| s.sha3()));
+ ret.use_secret = self.args.flag_node_key.as_ref().map(|s| s.parse::().unwrap_or_else(|_| s.sha3()));
ret.discovery_enabled = !self.args.flag_no_discovery && !self.args.flag_nodiscover;
ret.ideal_peers = self.max_peers();
- let mut net_path = PathBuf::from(&self.path());
+ let mut net_path = PathBuf::from(self.directories().db);
net_path.push("network");
ret.config_path = Some(net_path.to_str().unwrap().to_owned());
- ret.reserved_nodes = self.init_reserved_nodes();
+ ret.reserved_nodes = try!(self.init_reserved_nodes());
if self.args.flag_reserved_only {
ret.non_reserved_mode = ::util::network::NonReservedPeerMode::Deny;
}
- ret
+ Ok(ret)
}
- fn find_best_db(&self, spec: &Spec) -> Option {
- let mut ret = None;
- let mut latest_era = None;
- let jdb_types = [journaldb::Algorithm::Archive, journaldb::Algorithm::EarlyMerge, journaldb::Algorithm::OverlayRecent, journaldb::Algorithm::RefCounted];
- for i in jdb_types.into_iter() {
- let db = journaldb::new(&append_path(&get_db_path(Path::new(&self.path()), *i, spec.genesis_header().hash(), spec.fork_name.as_ref()), "state"), *i, kvdb::DatabaseConfig::default());
- trace!(target: "parity", "Looking for best DB: {} at {:?}", i, db.latest_era());
- match (latest_era, db.latest_era()) {
- (Some(best), Some(this)) if best >= this => {}
- (_, None) => {}
- (_, Some(this)) => {
- latest_era = Some(this);
- ret = Some(*i);
- }
- }
- }
- ret
- }
-
- pub fn pruning_algorithm(&self, spec: &Spec) -> journaldb::Algorithm {
- match self.args.flag_pruning.as_str() {
- "archive" => journaldb::Algorithm::Archive,
- "light" => journaldb::Algorithm::EarlyMerge,
- "fast" => journaldb::Algorithm::OverlayRecent,
- "basic" => journaldb::Algorithm::RefCounted,
- "auto" => self.find_best_db(spec).unwrap_or(journaldb::Algorithm::OverlayRecent),
- _ => { die!("Invalid pruning method given."); }
+ fn network_id(&self) -> Result