Reformat the source code

This commit is contained in:
Artem Vorotnikov 2020-08-05 07:08:03 +03:00
parent 253ff3f37b
commit 610d9baba4
No known key found for this signature in database
GPG Key ID: E0148C3F2FBB7A20
742 changed files with 175791 additions and 141379 deletions

View File

@ -26,11 +26,13 @@ extern crate threadpool;
#[macro_use]
extern crate serde_derive;
use std::num::ParseIntError;
use std::{env, fmt, process, io, sync};
use std::{env, fmt, io, num::ParseIntError, process, sync};
use docopt::Docopt;
use ethkey::{KeyPair, Random, Brain, BrainPrefix, Prefix, Error as EthkeyError, Generator, sign, verify_public, verify_address, brain_recover};
use ethkey::{
brain_recover, sign, verify_address, verify_public, Brain, BrainPrefix, Error as EthkeyError,
Generator, KeyPair, Prefix, Random,
};
use rustc_hex::{FromHex, FromHexError};
const USAGE: &'static str = r#"
@ -65,387 +67,428 @@ Commands:
#[derive(Debug, Deserialize)]
struct Args {
cmd_info: bool,
cmd_generate: bool,
cmd_random: bool,
cmd_prefix: bool,
cmd_sign: bool,
cmd_verify: bool,
cmd_public: bool,
cmd_address: bool,
cmd_recover: bool,
arg_prefix: String,
arg_secret: String,
arg_secret_or_phrase: String,
arg_known_phrase: String,
arg_message: String,
arg_public: String,
arg_address: String,
arg_signature: String,
flag_secret: bool,
flag_public: bool,
flag_address: bool,
flag_brain: bool,
cmd_info: bool,
cmd_generate: bool,
cmd_random: bool,
cmd_prefix: bool,
cmd_sign: bool,
cmd_verify: bool,
cmd_public: bool,
cmd_address: bool,
cmd_recover: bool,
arg_prefix: String,
arg_secret: String,
arg_secret_or_phrase: String,
arg_known_phrase: String,
arg_message: String,
arg_public: String,
arg_address: String,
arg_signature: String,
flag_secret: bool,
flag_public: bool,
flag_address: bool,
flag_brain: bool,
}
#[derive(Debug)]
enum Error {
Ethkey(EthkeyError),
FromHex(FromHexError),
ParseInt(ParseIntError),
Docopt(docopt::Error),
Io(io::Error),
Ethkey(EthkeyError),
FromHex(FromHexError),
ParseInt(ParseIntError),
Docopt(docopt::Error),
Io(io::Error),
}
impl From<EthkeyError> for Error {
fn from(err: EthkeyError) -> Self {
Error::Ethkey(err)
}
fn from(err: EthkeyError) -> Self {
Error::Ethkey(err)
}
}
impl From<FromHexError> for Error {
fn from(err: FromHexError) -> Self {
Error::FromHex(err)
}
fn from(err: FromHexError) -> Self {
Error::FromHex(err)
}
}
impl From<ParseIntError> for Error {
fn from(err: ParseIntError) -> Self {
Error::ParseInt(err)
}
fn from(err: ParseIntError) -> Self {
Error::ParseInt(err)
}
}
impl From<docopt::Error> for Error {
fn from(err: docopt::Error) -> Self {
Error::Docopt(err)
}
fn from(err: docopt::Error) -> Self {
Error::Docopt(err)
}
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Self {
Error::Io(err)
}
fn from(err: io::Error) -> Self {
Error::Io(err)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
Error::Ethkey(ref e) => write!(f, "{}", e),
Error::FromHex(ref e) => write!(f, "{}", e),
Error::ParseInt(ref e) => write!(f, "{}", e),
Error::Docopt(ref e) => write!(f, "{}", e),
Error::Io(ref e) => write!(f, "{}", e),
}
}
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
Error::Ethkey(ref e) => write!(f, "{}", e),
Error::FromHex(ref e) => write!(f, "{}", e),
Error::ParseInt(ref e) => write!(f, "{}", e),
Error::Docopt(ref e) => write!(f, "{}", e),
Error::Io(ref e) => write!(f, "{}", e),
}
}
}
enum DisplayMode {
KeyPair,
Secret,
Public,
Address,
KeyPair,
Secret,
Public,
Address,
}
impl DisplayMode {
fn new(args: &Args) -> Self {
if args.flag_secret {
DisplayMode::Secret
} else if args.flag_public {
DisplayMode::Public
} else if args.flag_address {
DisplayMode::Address
} else {
DisplayMode::KeyPair
}
}
fn new(args: &Args) -> Self {
if args.flag_secret {
DisplayMode::Secret
} else if args.flag_public {
DisplayMode::Public
} else if args.flag_address {
DisplayMode::Address
} else {
DisplayMode::KeyPair
}
}
}
fn main() {
panic_hook::set_abort();
env_logger::try_init().expect("Logger initialized only once.");
panic_hook::set_abort();
env_logger::try_init().expect("Logger initialized only once.");
match execute(env::args()) {
Ok(ok) => println!("{}", ok),
Err(Error::Docopt(ref e)) => e.exit(),
Err(err) => {
eprintln!("{}", err);
process::exit(1);
}
}
match execute(env::args()) {
Ok(ok) => println!("{}", ok),
Err(Error::Docopt(ref e)) => e.exit(),
Err(err) => {
eprintln!("{}", err);
process::exit(1);
}
}
}
fn display(result: (KeyPair, Option<String>), mode: DisplayMode) -> String {
let keypair = result.0;
match mode {
DisplayMode::KeyPair => match result.1 {
Some(extra_data) => format!("{}\n{}", extra_data, keypair),
None => format!("{}", keypair)
},
DisplayMode::Secret => format!("{:x}", keypair.secret()),
DisplayMode::Public => format!("{:x}", keypair.public()),
DisplayMode::Address => format!("{:x}", keypair.address()),
}
let keypair = result.0;
match mode {
DisplayMode::KeyPair => match result.1 {
Some(extra_data) => format!("{}\n{}", extra_data, keypair),
None => format!("{}", keypair),
},
DisplayMode::Secret => format!("{:x}", keypair.secret()),
DisplayMode::Public => format!("{:x}", keypair.public()),
DisplayMode::Address => format!("{:x}", keypair.address()),
}
}
fn execute<S, I>(command: I) -> Result<String, Error> where I: IntoIterator<Item=S>, S: AsRef<str> {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.argv(command).deserialize())?;
fn execute<S, I>(command: I) -> Result<String, Error>
where
I: IntoIterator<Item = S>,
S: AsRef<str>,
{
let args: Args = Docopt::new(USAGE).and_then(|d| d.argv(command).deserialize())?;
return if args.cmd_info {
let display_mode = DisplayMode::new(&args);
return if args.cmd_info {
let display_mode = DisplayMode::new(&args);
let result = if args.flag_brain {
let phrase = args.arg_secret_or_phrase;
let phrase_info = validate_phrase(&phrase);
let keypair = Brain::new(phrase).generate().expect("Brain wallet generator is infallible; qed");
(keypair, Some(phrase_info))
} else {
let secret = args.arg_secret_or_phrase.parse().map_err(|_| EthkeyError::InvalidSecret)?;
(KeyPair::from_secret(secret)?, None)
};
Ok(display(result, display_mode))
} else if args.cmd_generate {
let display_mode = DisplayMode::new(&args);
let result = if args.cmd_random {
if args.flag_brain {
let mut brain = BrainPrefix::new(vec![0], usize::max_value(), BRAIN_WORDS);
let keypair = brain.generate()?;
let phrase = format!("recovery phrase: {}", brain.phrase());
(keypair, Some(phrase))
} else {
(Random.generate()?, None)
}
} else if args.cmd_prefix {
let prefix = args.arg_prefix.from_hex()?;
let brain = args.flag_brain;
in_threads(move || {
let iterations = 1024;
let prefix = prefix.clone();
move || {
let prefix = prefix.clone();
let res = if brain {
let mut brain = BrainPrefix::new(prefix, iterations, BRAIN_WORDS);
let result = brain.generate();
let phrase = format!("recovery phrase: {}", brain.phrase());
result.map(|keypair| (keypair, Some(phrase)))
} else {
let result = Prefix::new(prefix, iterations).generate();
result.map(|res| (res, None))
};
let result = if args.flag_brain {
let phrase = args.arg_secret_or_phrase;
let phrase_info = validate_phrase(&phrase);
let keypair = Brain::new(phrase)
.generate()
.expect("Brain wallet generator is infallible; qed");
(keypair, Some(phrase_info))
} else {
let secret = args
.arg_secret_or_phrase
.parse()
.map_err(|_| EthkeyError::InvalidSecret)?;
(KeyPair::from_secret(secret)?, None)
};
Ok(display(result, display_mode))
} else if args.cmd_generate {
let display_mode = DisplayMode::new(&args);
let result = if args.cmd_random {
if args.flag_brain {
let mut brain = BrainPrefix::new(vec![0], usize::max_value(), BRAIN_WORDS);
let keypair = brain.generate()?;
let phrase = format!("recovery phrase: {}", brain.phrase());
(keypair, Some(phrase))
} else {
(Random.generate()?, None)
}
} else if args.cmd_prefix {
let prefix = args.arg_prefix.from_hex()?;
let brain = args.flag_brain;
in_threads(move || {
let iterations = 1024;
let prefix = prefix.clone();
move || {
let prefix = prefix.clone();
let res = if brain {
let mut brain = BrainPrefix::new(prefix, iterations, BRAIN_WORDS);
let result = brain.generate();
let phrase = format!("recovery phrase: {}", brain.phrase());
result.map(|keypair| (keypair, Some(phrase)))
} else {
let result = Prefix::new(prefix, iterations).generate();
result.map(|res| (res, None))
};
Ok(res.map(Some).unwrap_or(None))
}
})?
} else {
return Ok(format!("{}", USAGE))
};
Ok(display(result, display_mode))
} else if args.cmd_sign {
let secret = args.arg_secret.parse().map_err(|_| EthkeyError::InvalidSecret)?;
let message = args.arg_message.parse().map_err(|_| EthkeyError::InvalidMessage)?;
let signature = sign(&secret, &message)?;
Ok(format!("{}", signature))
} else if args.cmd_verify {
let signature = args.arg_signature.parse().map_err(|_| EthkeyError::InvalidSignature)?;
let message = args.arg_message.parse().map_err(|_| EthkeyError::InvalidMessage)?;
let ok = if args.cmd_public {
let public = args.arg_public.parse().map_err(|_| EthkeyError::InvalidPublic)?;
verify_public(&public, &signature, &message)?
} else if args.cmd_address {
let address = args.arg_address.parse().map_err(|_| EthkeyError::InvalidAddress)?;
verify_address(&address, &signature, &message)?
} else {
return Ok(format!("{}", USAGE))
};
Ok(format!("{}", ok))
} else if args.cmd_recover {
let display_mode = DisplayMode::new(&args);
let known_phrase = args.arg_known_phrase;
let address = args.arg_address.parse().map_err(|_| EthkeyError::InvalidAddress)?;
let (phrase, keypair) = in_threads(move || {
let mut it = brain_recover::PhrasesIterator::from_known_phrase(&known_phrase, BRAIN_WORDS);
move || {
let mut i = 0;
while let Some(phrase) = it.next() {
i += 1;
Ok(res.map(Some).unwrap_or(None))
}
})?
} else {
return Ok(format!("{}", USAGE));
};
Ok(display(result, display_mode))
} else if args.cmd_sign {
let secret = args
.arg_secret
.parse()
.map_err(|_| EthkeyError::InvalidSecret)?;
let message = args
.arg_message
.parse()
.map_err(|_| EthkeyError::InvalidMessage)?;
let signature = sign(&secret, &message)?;
Ok(format!("{}", signature))
} else if args.cmd_verify {
let signature = args
.arg_signature
.parse()
.map_err(|_| EthkeyError::InvalidSignature)?;
let message = args
.arg_message
.parse()
.map_err(|_| EthkeyError::InvalidMessage)?;
let ok = if args.cmd_public {
let public = args
.arg_public
.parse()
.map_err(|_| EthkeyError::InvalidPublic)?;
verify_public(&public, &signature, &message)?
} else if args.cmd_address {
let address = args
.arg_address
.parse()
.map_err(|_| EthkeyError::InvalidAddress)?;
verify_address(&address, &signature, &message)?
} else {
return Ok(format!("{}", USAGE));
};
Ok(format!("{}", ok))
} else if args.cmd_recover {
let display_mode = DisplayMode::new(&args);
let known_phrase = args.arg_known_phrase;
let address = args
.arg_address
.parse()
.map_err(|_| EthkeyError::InvalidAddress)?;
let (phrase, keypair) = in_threads(move || {
let mut it =
brain_recover::PhrasesIterator::from_known_phrase(&known_phrase, BRAIN_WORDS);
move || {
let mut i = 0;
while let Some(phrase) = it.next() {
i += 1;
let keypair = Brain::new(phrase.clone()).generate().unwrap();
if keypair.address() == address {
return Ok(Some((phrase, keypair)))
}
let keypair = Brain::new(phrase.clone()).generate().unwrap();
if keypair.address() == address {
return Ok(Some((phrase, keypair)));
}
if i >= 1024 {
return Ok(None)
}
}
if i >= 1024 {
return Ok(None);
}
}
Err(EthkeyError::Custom("Couldn't find any results.".into()))
}
})?;
Ok(display((keypair, Some(phrase)), display_mode))
} else {
Ok(format!("{}", USAGE))
}
Err(EthkeyError::Custom("Couldn't find any results.".into()))
}
})?;
Ok(display((keypair, Some(phrase)), display_mode))
} else {
Ok(format!("{}", USAGE))
};
}
const BRAIN_WORDS: usize = 12;
fn validate_phrase(phrase: &str) -> String {
match Brain::validate_phrase(phrase, BRAIN_WORDS) {
Ok(()) => format!("The recovery phrase looks correct.\n"),
Err(err) => format!("The recover phrase was not generated by Parity: {}", err)
}
match Brain::validate_phrase(phrase, BRAIN_WORDS) {
Ok(()) => format!("The recovery phrase looks correct.\n"),
Err(err) => format!("The recover phrase was not generated by Parity: {}", err),
}
}
fn in_threads<F, X, O>(prepare: F) -> Result<O, EthkeyError> where
O: Send + 'static,
X: Send + 'static,
F: Fn() -> X,
X: FnMut() -> Result<Option<O>, EthkeyError>,
fn in_threads<F, X, O>(prepare: F) -> Result<O, EthkeyError>
where
O: Send + 'static,
X: Send + 'static,
F: Fn() -> X,
X: FnMut() -> Result<Option<O>, EthkeyError>,
{
let pool = threadpool::Builder::new().build();
let pool = threadpool::Builder::new().build();
let (tx, rx) = sync::mpsc::sync_channel(1);
let is_done = sync::Arc::new(sync::atomic::AtomicBool::default());
let (tx, rx) = sync::mpsc::sync_channel(1);
let is_done = sync::Arc::new(sync::atomic::AtomicBool::default());
for _ in 0..pool.max_count() {
let is_done = is_done.clone();
let tx = tx.clone();
let mut task = prepare();
pool.execute(move || {
loop {
if is_done.load(sync::atomic::Ordering::SeqCst) {
return;
}
for _ in 0..pool.max_count() {
let is_done = is_done.clone();
let tx = tx.clone();
let mut task = prepare();
pool.execute(move || {
loop {
if is_done.load(sync::atomic::Ordering::SeqCst) {
return;
}
let res = match task() {
Ok(None) => continue,
Ok(Some(v)) => Ok(v),
Err(err) => Err(err),
};
let res = match task() {
Ok(None) => continue,
Ok(Some(v)) => Ok(v),
Err(err) => Err(err),
};
// We are interested only in the first response.
let _ = tx.send(res);
}
});
}
// We are interested only in the first response.
let _ = tx.send(res);
}
});
}
if let Ok(solution) = rx.recv() {
is_done.store(true, sync::atomic::Ordering::SeqCst);
return solution;
}
if let Ok(solution) = rx.recv() {
is_done.store(true, sync::atomic::Ordering::SeqCst);
return solution;
}
Err(EthkeyError::Custom("No results found.".into()))
Err(EthkeyError::Custom("No results found.".into()))
}
#[cfg(test)]
mod tests {
use super::execute;
use super::execute;
#[test]
fn info() {
let command = vec!["ethkey", "info", "17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55"]
.into_iter()
.map(Into::into)
.collect::<Vec<String>>();
#[test]
fn info() {
let command = vec![
"ethkey",
"info",
"17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55",
]
.into_iter()
.map(Into::into)
.collect::<Vec<String>>();
let expected =
let expected =
"secret: 17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55
public: 689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124
address: 26d1ec50b4e62c1d1a40d16e7cacc6a6580757d5".to_owned();
assert_eq!(execute(command).unwrap(), expected);
}
assert_eq!(execute(command).unwrap(), expected);
}
#[test]
fn brain() {
let command = vec!["ethkey", "info", "--brain", "this is sparta"]
.into_iter()
.map(Into::into)
.collect::<Vec<String>>();
#[test]
fn brain() {
let command = vec!["ethkey", "info", "--brain", "this is sparta"]
.into_iter()
.map(Into::into)
.collect::<Vec<String>>();
let expected =
let expected =
"The recover phrase was not generated by Parity: The word 'this' does not come from the dictionary.
secret: aa22b54c0cb43ee30a014afe5ef3664b1cde299feabca46cd3167a85a57c39f2
public: c4c5398da6843632c123f543d714d2d2277716c11ff612b2a2f23c6bda4d6f0327c31cd58c55a9572c3cc141dade0c32747a13b7ef34c241b26c84adbb28fcf4
address: 006e27b6a72e1f34c626762f3c4761547aff1421".to_owned();
assert_eq!(execute(command).unwrap(), expected);
}
assert_eq!(execute(command).unwrap(), expected);
}
#[test]
fn secret() {
let command = vec!["ethkey", "info", "--brain", "this is sparta", "--secret"]
#[test]
fn secret() {
let command = vec!["ethkey", "info", "--brain", "this is sparta", "--secret"]
.into_iter()
.map(Into::into)
.collect::<Vec<String>>();
let expected =
"aa22b54c0cb43ee30a014afe5ef3664b1cde299feabca46cd3167a85a57c39f2".to_owned();
assert_eq!(execute(command).unwrap(), expected);
}
#[test]
fn public() {
let command = vec!["ethkey", "info", "--brain", "this is sparta", "--public"]
.into_iter()
.map(Into::into)
.collect::<Vec<String>>();
let expected = "c4c5398da6843632c123f543d714d2d2277716c11ff612b2a2f23c6bda4d6f0327c31cd58c55a9572c3cc141dade0c32747a13b7ef34c241b26c84adbb28fcf4".to_owned();
assert_eq!(execute(command).unwrap(), expected);
}
#[test]
fn address() {
let command = vec!["ethkey", "info", "-b", "this is sparta", "--address"]
.into_iter()
.map(Into::into)
.collect::<Vec<String>>();
let expected = "006e27b6a72e1f34c626762f3c4761547aff1421".to_owned();
assert_eq!(execute(command).unwrap(), expected);
}
#[test]
fn sign() {
let command = vec![
"ethkey",
"sign",
"17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55",
"bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec987",
]
.into_iter()
.map(Into::into)
.collect::<Vec<String>>();
let expected = "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200".to_owned();
assert_eq!(execute(command).unwrap(), expected);
}
#[test]
fn verify_valid_public() {
let command = vec!["ethkey", "verify", "public", "689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124", "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec987"]
.into_iter()
.map(Into::into)
.collect::<Vec<String>>();
let expected = "aa22b54c0cb43ee30a014afe5ef3664b1cde299feabca46cd3167a85a57c39f2".to_owned();
assert_eq!(execute(command).unwrap(), expected);
}
let expected = "true".to_owned();
assert_eq!(execute(command).unwrap(), expected);
}
#[test]
fn public() {
let command = vec!["ethkey", "info", "--brain", "this is sparta", "--public"]
#[test]
fn verify_valid_address() {
let command = vec!["ethkey", "verify", "address", "26d1ec50b4e62c1d1a40d16e7cacc6a6580757d5", "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec987"]
.into_iter()
.map(Into::into)
.collect::<Vec<String>>();
let expected = "c4c5398da6843632c123f543d714d2d2277716c11ff612b2a2f23c6bda4d6f0327c31cd58c55a9572c3cc141dade0c32747a13b7ef34c241b26c84adbb28fcf4".to_owned();
assert_eq!(execute(command).unwrap(), expected);
}
let expected = "true".to_owned();
assert_eq!(execute(command).unwrap(), expected);
}
#[test]
fn address() {
let command = vec!["ethkey", "info", "-b", "this is sparta", "--address"]
#[test]
fn verify_invalid() {
let command = vec!["ethkey", "verify", "public", "689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124", "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec986"]
.into_iter()
.map(Into::into)
.collect::<Vec<String>>();
let expected = "006e27b6a72e1f34c626762f3c4761547aff1421".to_owned();
assert_eq!(execute(command).unwrap(), expected);
}
#[test]
fn sign() {
let command = vec!["ethkey", "sign", "17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec987"]
.into_iter()
.map(Into::into)
.collect::<Vec<String>>();
let expected = "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200".to_owned();
assert_eq!(execute(command).unwrap(), expected);
}
#[test]
fn verify_valid_public() {
let command = vec!["ethkey", "verify", "public", "689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124", "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec987"]
.into_iter()
.map(Into::into)
.collect::<Vec<String>>();
let expected = "true".to_owned();
assert_eq!(execute(command).unwrap(), expected);
}
#[test]
fn verify_valid_address() {
let command = vec!["ethkey", "verify", "address", "26d1ec50b4e62c1d1a40d16e7cacc6a6580757d5", "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec987"]
.into_iter()
.map(Into::into)
.collect::<Vec<String>>();
let expected = "true".to_owned();
assert_eq!(execute(command).unwrap(), expected);
}
#[test]
fn verify_invalid() {
let command = vec!["ethkey", "verify", "public", "689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124", "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec986"]
.into_iter()
.map(Into::into)
.collect::<Vec<String>>();
let expected = "false".to_owned();
assert_eq!(execute(command).unwrap(), expected);
}
let expected = "false".to_owned();
assert_eq!(execute(command).unwrap(), expected);
}
}

View File

@ -14,60 +14,61 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use super::{Generator, KeyPair, Secret};
use keccak::Keccak256;
use super::{KeyPair, Generator, Secret};
use parity_wordlist;
/// Simple brainwallet.
pub struct Brain(String);
impl Brain {
pub fn new(s: String) -> Self {
Brain(s)
}
pub fn new(s: String) -> Self {
Brain(s)
}
pub fn validate_phrase(phrase: &str, expected_words: usize) -> Result<(), ::WordlistError> {
parity_wordlist::validate_phrase(phrase, expected_words)
}
pub fn validate_phrase(phrase: &str, expected_words: usize) -> Result<(), ::WordlistError> {
parity_wordlist::validate_phrase(phrase, expected_words)
}
}
impl Generator for Brain {
type Error = ::Void;
fn generate(&mut self) -> Result<KeyPair, Self::Error> {
let seed = self.0.clone();
let mut secret = seed.into_bytes().keccak256();
fn generate(&mut self) -> Result<KeyPair, Self::Error> {
let seed = self.0.clone();
let mut secret = seed.into_bytes().keccak256();
let mut i = 0;
loop {
secret = secret.keccak256();
let mut i = 0;
loop {
secret = secret.keccak256();
match i > 16384 {
false => i += 1,
true => {
if let Ok(pair) = Secret::from_unsafe_slice(&secret)
.and_then(KeyPair::from_secret)
{
if pair.address()[0] == 0 {
trace!("Testing: {}, got: {:?}", self.0, pair.address());
return Ok(pair)
}
}
},
}
}
}
match i > 16384 {
false => i += 1,
true => {
if let Ok(pair) =
Secret::from_unsafe_slice(&secret).and_then(KeyPair::from_secret)
{
if pair.address()[0] == 0 {
trace!("Testing: {}, got: {:?}", self.0, pair.address());
return Ok(pair);
}
}
}
}
}
}
}
#[cfg(test)]
mod tests {
use {Brain, Generator};
use Brain;
use Generator;
#[test]
fn test_brain() {
let words = "this is sparta!".to_owned();
let first_keypair = Brain::new(words.clone()).generate().unwrap();
let second_keypair = Brain::new(words.clone()).generate().unwrap();
assert_eq!(first_keypair.secret(), second_keypair.secret());
}
#[test]
fn test_brain() {
let words = "this is sparta!".to_owned();
let first_keypair = Brain::new(words.clone()).generate().unwrap();
let second_keypair = Brain::new(words.clone()).generate().unwrap();
assert_eq!(first_keypair.secret(), second_keypair.secret());
}
}

View File

@ -14,57 +14,60 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use super::{Generator, KeyPair, Error, Brain};
use super::{Brain, Error, Generator, KeyPair};
use parity_wordlist as wordlist;
/// Tries to find brain-seed keypair with address starting with given prefix.
pub struct BrainPrefix {
prefix: Vec<u8>,
iterations: usize,
no_of_words: usize,
last_phrase: String,
prefix: Vec<u8>,
iterations: usize,
no_of_words: usize,
last_phrase: String,
}
impl BrainPrefix {
pub fn new(prefix: Vec<u8>, iterations: usize, no_of_words: usize) -> Self {
BrainPrefix {
prefix,
iterations,
no_of_words,
last_phrase: String::new(),
}
}
pub fn new(prefix: Vec<u8>, iterations: usize, no_of_words: usize) -> Self {
BrainPrefix {
prefix,
iterations,
no_of_words,
last_phrase: String::new(),
}
}
pub fn phrase(&self) -> &str {
&self.last_phrase
}
pub fn phrase(&self) -> &str {
&self.last_phrase
}
}
impl Generator for BrainPrefix {
type Error = Error;
type Error = Error;
fn generate(&mut self) -> Result<KeyPair, Error> {
for _ in 0..self.iterations {
let phrase = wordlist::random_phrase(self.no_of_words);
let keypair = Brain::new(phrase.clone()).generate().unwrap();
if keypair.address().starts_with(&self.prefix) {
self.last_phrase = phrase;
return Ok(keypair)
}
}
fn generate(&mut self) -> Result<KeyPair, Error> {
for _ in 0..self.iterations {
let phrase = wordlist::random_phrase(self.no_of_words);
let keypair = Brain::new(phrase.clone()).generate().unwrap();
if keypair.address().starts_with(&self.prefix) {
self.last_phrase = phrase;
return Ok(keypair);
}
}
Err(Error::Custom("Could not find keypair".into()))
}
Err(Error::Custom("Could not find keypair".into()))
}
}
#[cfg(test)]
mod tests {
use {Generator, BrainPrefix};
use BrainPrefix;
use Generator;
#[test]
fn prefix_generator() {
let prefix = vec![0x00u8];
let keypair = BrainPrefix::new(prefix.clone(), usize::max_value(), 12).generate().unwrap();
assert!(keypair.address().starts_with(&prefix));
}
#[test]
fn prefix_generator() {
let prefix = vec![0x00u8];
let keypair = BrainPrefix::new(prefix.clone(), usize::max_value(), 12)
.generate()
.unwrap();
assert!(keypair.address().starts_with(&prefix));
}
}

View File

@ -26,148 +26,153 @@ use super::{Address, Brain, Generator};
///
/// Returns `None` if phrase couldn't be found.
pub fn brain_recover(
address: &Address,
known_phrase: &str,
expected_words: usize,
address: &Address,
known_phrase: &str,
expected_words: usize,
) -> Option<String> {
let it = PhrasesIterator::from_known_phrase(known_phrase, expected_words);
for phrase in it {
let keypair = Brain::new(phrase.clone()).generate().expect("Brain wallets are infallible; qed");
trace!("Testing: {}, got: {:?}", phrase, keypair.address());
if &keypair.address() == address {
return Some(phrase);
}
}
let it = PhrasesIterator::from_known_phrase(known_phrase, expected_words);
for phrase in it {
let keypair = Brain::new(phrase.clone())
.generate()
.expect("Brain wallets are infallible; qed");
trace!("Testing: {}, got: {:?}", phrase, keypair.address());
if &keypair.address() == address {
return Some(phrase);
}
}
None
None
}
fn generate_substitutions(word: &str) -> Vec<&'static str> {
let mut words = parity_wordlist::WORDS.iter().cloned()
.map(|w| (edit_distance(w, word), w))
.collect::<Vec<_>>();
words.sort_by(|a, b| a.0.cmp(&b.0));
let mut words = parity_wordlist::WORDS
.iter()
.cloned()
.map(|w| (edit_distance(w, word), w))
.collect::<Vec<_>>();
words.sort_by(|a, b| a.0.cmp(&b.0));
words.into_iter()
.map(|pair| pair.1)
.collect()
words.into_iter().map(|pair| pair.1).collect()
}
/// Iterator over possible
pub struct PhrasesIterator {
words: Vec<Vec<&'static str>>,
combinations: u64,
indexes: Vec<usize>,
has_next: bool,
words: Vec<Vec<&'static str>>,
combinations: u64,
indexes: Vec<usize>,
has_next: bool,
}
impl PhrasesIterator {
pub fn from_known_phrase(known_phrase: &str, expected_words: usize) -> Self {
let known_words = parity_wordlist::WORDS.iter().cloned().collect::<HashSet<_>>();
let mut words = known_phrase.split(' ')
.map(|word| match known_words.get(word) {
None => {
info!("Invalid word '{}', looking for potential substitutions.", word);
let substitutions = generate_substitutions(word);
info!("Closest words: {:?}", &substitutions[..10]);
substitutions
},
Some(word) => vec![*word],
})
.collect::<Vec<_>>();
pub fn from_known_phrase(known_phrase: &str, expected_words: usize) -> Self {
let known_words = parity_wordlist::WORDS
.iter()
.cloned()
.collect::<HashSet<_>>();
let mut words = known_phrase
.split(' ')
.map(|word| match known_words.get(word) {
None => {
info!(
"Invalid word '{}', looking for potential substitutions.",
word
);
let substitutions = generate_substitutions(word);
info!("Closest words: {:?}", &substitutions[..10]);
substitutions
}
Some(word) => vec![*word],
})
.collect::<Vec<_>>();
// add missing words
if words.len() < expected_words {
let to_add = expected_words - words.len();
info!("Number of words is insuficcient adding {} more.", to_add);
for _ in 0..to_add {
words.push(parity_wordlist::WORDS.iter().cloned().collect());
}
}
// add missing words
if words.len() < expected_words {
let to_add = expected_words - words.len();
info!("Number of words is insuficcient adding {} more.", to_add);
for _ in 0..to_add {
words.push(parity_wordlist::WORDS.iter().cloned().collect());
}
}
// start searching
PhrasesIterator::new(words)
}
// start searching
PhrasesIterator::new(words)
}
pub fn new(words: Vec<Vec<&'static str>>) -> Self {
let combinations = words.iter().fold(1u64, |acc, x| acc * x.len() as u64);
let indexes = words.iter().map(|_| 0).collect();
info!("Starting to test {} possible combinations.", combinations);
pub fn new(words: Vec<Vec<&'static str>>) -> Self {
let combinations = words.iter().fold(1u64, |acc, x| acc * x.len() as u64);
let indexes = words.iter().map(|_| 0).collect();
info!("Starting to test {} possible combinations.", combinations);
PhrasesIterator {
words,
combinations,
indexes,
has_next: combinations > 0,
}
}
PhrasesIterator {
words,
combinations,
indexes,
has_next: combinations > 0,
}
}
pub fn combinations(&self) -> u64 {
self.combinations
}
pub fn combinations(&self) -> u64 {
self.combinations
}
fn current(&self) -> String {
let mut s = self.words[0][self.indexes[0]].to_owned();
for i in 1..self.indexes.len() {
s.push(' ');
s.push_str(self.words[i][self.indexes[i]]);
}
s
}
fn current(&self) -> String {
let mut s = self.words[0][self.indexes[0]].to_owned();
for i in 1..self.indexes.len() {
s.push(' ');
s.push_str(self.words[i][self.indexes[i]]);
}
s
}
fn next_index(&mut self) -> bool {
let mut pos = self.indexes.len();
while pos > 0 {
pos -= 1;
self.indexes[pos] += 1;
if self.indexes[pos] >= self.words[pos].len() {
self.indexes[pos] = 0;
} else {
return true;
}
}
fn next_index(&mut self) -> bool {
let mut pos = self.indexes.len();
while pos > 0 {
pos -= 1;
self.indexes[pos] += 1;
if self.indexes[pos] >= self.words[pos].len() {
self.indexes[pos] = 0;
} else {
return true;
}
}
false
}
false
}
}
impl Iterator for PhrasesIterator {
type Item = String;
type Item = String;
fn next(&mut self) -> Option<String> {
if !self.has_next {
return None;
}
fn next(&mut self) -> Option<String> {
if !self.has_next {
return None;
}
let phrase = self.current();
self.has_next = self.next_index();
Some(phrase)
}
let phrase = self.current();
self.has_next = self.next_index();
Some(phrase)
}
}
#[cfg(test)]
mod tests {
use super::PhrasesIterator;
use super::PhrasesIterator;
#[test]
fn should_generate_possible_combinations() {
let mut it = PhrasesIterator::new(vec![
vec!["1", "2", "3"],
vec!["test"],
vec!["a", "b", "c"],
]);
assert_eq!(it.combinations(), 9);
assert_eq!(it.next(), Some("1 test a".to_owned()));
assert_eq!(it.next(), Some("1 test b".to_owned()));
assert_eq!(it.next(), Some("1 test c".to_owned()));
assert_eq!(it.next(), Some("2 test a".to_owned()));
assert_eq!(it.next(), Some("2 test b".to_owned()));
assert_eq!(it.next(), Some("2 test c".to_owned()));
assert_eq!(it.next(), Some("3 test a".to_owned()));
assert_eq!(it.next(), Some("3 test b".to_owned()));
assert_eq!(it.next(), Some("3 test c".to_owned()));
assert_eq!(it.next(), None);
}
#[test]
fn should_generate_possible_combinations() {
let mut it =
PhrasesIterator::new(vec![vec!["1", "2", "3"], vec!["test"], vec!["a", "b", "c"]]);
assert_eq!(it.combinations(), 9);
assert_eq!(it.next(), Some("1 test a".to_owned()));
assert_eq!(it.next(), Some("1 test b".to_owned()));
assert_eq!(it.next(), Some("1 test c".to_owned()));
assert_eq!(it.next(), Some("2 test a".to_owned()));
assert_eq!(it.next(), Some("2 test b".to_owned()));
assert_eq!(it.next(), Some("2 test c".to_owned()));
assert_eq!(it.next(), Some("3 test a".to_owned()));
assert_eq!(it.next(), Some("3 test b".to_owned()));
assert_eq!(it.next(), Some("3 test c".to_owned()));
assert_eq!(it.next(), None);
}
}

View File

@ -14,176 +14,187 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use parity_crypto::error::SymmError;
use secp256k1;
use std::io;
use parity_crypto::error::SymmError;
quick_error! {
#[derive(Debug)]
pub enum Error {
Secp(e: secp256k1::Error) {
display("secp256k1 error: {}", e)
cause(e)
from()
}
Io(e: io::Error) {
display("i/o error: {}", e)
cause(e)
from()
}
InvalidMessage {
display("invalid message")
}
Symm(e: SymmError) {
cause(e)
from()
}
}
#[derive(Debug)]
pub enum Error {
Secp(e: secp256k1::Error) {
display("secp256k1 error: {}", e)
cause(e)
from()
}
Io(e: io::Error) {
display("i/o error: {}", e)
cause(e)
from()
}
InvalidMessage {
display("invalid message")
}
Symm(e: SymmError) {
cause(e)
from()
}
}
}
/// ECDH functions
pub mod ecdh {
use secp256k1::{self, ecdh, key};
use super::Error;
use {Secret, Public, SECP256K1};
use super::Error;
use secp256k1::{self, ecdh, key};
use Public;
use Secret;
use SECP256K1;
/// Agree on a shared secret
pub fn agree(secret: &Secret, public: &Public) -> Result<Secret, Error> {
let context = &SECP256K1;
let pdata = {
let mut temp = [4u8; 65];
(&mut temp[1..65]).copy_from_slice(&public[0..64]);
temp
};
/// Agree on a shared secret
pub fn agree(secret: &Secret, public: &Public) -> Result<Secret, Error> {
let context = &SECP256K1;
let pdata = {
let mut temp = [4u8; 65];
(&mut temp[1..65]).copy_from_slice(&public[0..64]);
temp
};
let publ = key::PublicKey::from_slice(context, &pdata)?;
let sec = key::SecretKey::from_slice(context, &secret)?;
let shared = ecdh::SharedSecret::new_raw(context, &publ, &sec);
let publ = key::PublicKey::from_slice(context, &pdata)?;
let sec = key::SecretKey::from_slice(context, &secret)?;
let shared = ecdh::SharedSecret::new_raw(context, &publ, &sec);
Secret::from_unsafe_slice(&shared[0..32])
.map_err(|_| Error::Secp(secp256k1::Error::InvalidSecretKey))
}
Secret::from_unsafe_slice(&shared[0..32])
.map_err(|_| Error::Secp(secp256k1::Error::InvalidSecretKey))
}
}
/// ECIES function
pub mod ecies {
use parity_crypto::{aes, digest, hmac, is_equal};
use ethereum_types::H128;
use super::{ecdh, Error};
use {Random, Generator, Public, Secret};
use super::{ecdh, Error};
use ethereum_types::H128;
use parity_crypto::{aes, digest, hmac, is_equal};
use Generator;
use Public;
use Random;
use Secret;
/// Encrypt a message with a public key, writing an HMAC covering both
/// the plaintext and authenticated data.
///
/// Authenticated data may be empty.
pub fn encrypt(public: &Public, auth_data: &[u8], plain: &[u8]) -> Result<Vec<u8>, Error> {
let r = Random.generate()?;
let z = ecdh::agree(r.secret(), public)?;
let mut key = [0u8; 32];
kdf(&z, &[0u8; 0], &mut key);
/// Encrypt a message with a public key, writing an HMAC covering both
/// the plaintext and authenticated data.
///
/// Authenticated data may be empty.
pub fn encrypt(public: &Public, auth_data: &[u8], plain: &[u8]) -> Result<Vec<u8>, Error> {
let r = Random.generate()?;
let z = ecdh::agree(r.secret(), public)?;
let mut key = [0u8; 32];
kdf(&z, &[0u8; 0], &mut key);
let ekey = &key[0..16];
let mkey = hmac::SigKey::sha256(&digest::sha256(&key[16..32]));
let ekey = &key[0..16];
let mkey = hmac::SigKey::sha256(&digest::sha256(&key[16..32]));
let mut msg = vec![0u8; 1 + 64 + 16 + plain.len() + 32];
msg[0] = 0x04u8;
{
let msgd = &mut msg[1..];
msgd[0..64].copy_from_slice(r.public());
let iv = H128::random();
msgd[64..80].copy_from_slice(&iv);
{
let cipher = &mut msgd[(64 + 16)..(64 + 16 + plain.len())];
aes::encrypt_128_ctr(ekey, &iv, plain, cipher)?;
}
let mut hmac = hmac::Signer::with(&mkey);
{
let cipher_iv = &msgd[64..(64 + 16 + plain.len())];
hmac.update(cipher_iv);
}
hmac.update(auth_data);
let sig = hmac.sign();
msgd[(64 + 16 + plain.len())..].copy_from_slice(&sig);
}
Ok(msg)
}
let mut msg = vec![0u8; 1 + 64 + 16 + plain.len() + 32];
msg[0] = 0x04u8;
{
let msgd = &mut msg[1..];
msgd[0..64].copy_from_slice(r.public());
let iv = H128::random();
msgd[64..80].copy_from_slice(&iv);
{
let cipher = &mut msgd[(64 + 16)..(64 + 16 + plain.len())];
aes::encrypt_128_ctr(ekey, &iv, plain, cipher)?;
}
let mut hmac = hmac::Signer::with(&mkey);
{
let cipher_iv = &msgd[64..(64 + 16 + plain.len())];
hmac.update(cipher_iv);
}
hmac.update(auth_data);
let sig = hmac.sign();
msgd[(64 + 16 + plain.len())..].copy_from_slice(&sig);
}
Ok(msg)
}
/// Decrypt a message with a secret key, checking HMAC for ciphertext
/// and authenticated data validity.
pub fn decrypt(secret: &Secret, auth_data: &[u8], encrypted: &[u8]) -> Result<Vec<u8>, Error> {
let meta_len = 1 + 64 + 16 + 32;
if encrypted.len() < meta_len || encrypted[0] < 2 || encrypted[0] > 4 {
return Err(Error::InvalidMessage); //invalid message: publickey
}
/// Decrypt a message with a secret key, checking HMAC for ciphertext
/// and authenticated data validity.
pub fn decrypt(secret: &Secret, auth_data: &[u8], encrypted: &[u8]) -> Result<Vec<u8>, Error> {
let meta_len = 1 + 64 + 16 + 32;
if encrypted.len() < meta_len || encrypted[0] < 2 || encrypted[0] > 4 {
return Err(Error::InvalidMessage); //invalid message: publickey
}
let e = &encrypted[1..];
let p = Public::from_slice(&e[0..64]);
let z = ecdh::agree(secret, &p)?;
let mut key = [0u8; 32];
kdf(&z, &[0u8; 0], &mut key);
let e = &encrypted[1..];
let p = Public::from_slice(&e[0..64]);
let z = ecdh::agree(secret, &p)?;
let mut key = [0u8; 32];
kdf(&z, &[0u8; 0], &mut key);
let ekey = &key[0..16];
let mkey = hmac::SigKey::sha256(&digest::sha256(&key[16..32]));
let ekey = &key[0..16];
let mkey = hmac::SigKey::sha256(&digest::sha256(&key[16..32]));
let clen = encrypted.len() - meta_len;
let cipher_with_iv = &e[64..(64+16+clen)];
let cipher_iv = &cipher_with_iv[0..16];
let cipher_no_iv = &cipher_with_iv[16..];
let msg_mac = &e[(64+16+clen)..];
let clen = encrypted.len() - meta_len;
let cipher_with_iv = &e[64..(64 + 16 + clen)];
let cipher_iv = &cipher_with_iv[0..16];
let cipher_no_iv = &cipher_with_iv[16..];
let msg_mac = &e[(64 + 16 + clen)..];
// Verify tag
let mut hmac = hmac::Signer::with(&mkey);
hmac.update(cipher_with_iv);
hmac.update(auth_data);
let mac = hmac.sign();
// Verify tag
let mut hmac = hmac::Signer::with(&mkey);
hmac.update(cipher_with_iv);
hmac.update(auth_data);
let mac = hmac.sign();
if !is_equal(&mac.as_ref()[..], msg_mac) {
return Err(Error::InvalidMessage);
}
if !is_equal(&mac.as_ref()[..], msg_mac) {
return Err(Error::InvalidMessage);
}
let mut msg = vec![0u8; clen];
aes::decrypt_128_ctr(ekey, cipher_iv, cipher_no_iv, &mut msg[..])?;
Ok(msg)
}
let mut msg = vec![0u8; clen];
aes::decrypt_128_ctr(ekey, cipher_iv, cipher_no_iv, &mut msg[..])?;
Ok(msg)
}
fn kdf(secret: &Secret, s1: &[u8], dest: &mut [u8]) {
// SEC/ISO/Shoup specify counter size SHOULD be equivalent
// to size of hash output, however, it also notes that
// the 4 bytes is okay. NIST specifies 4 bytes.
let mut ctr = 1u32;
let mut written = 0usize;
while written < dest.len() {
let mut hasher = digest::Hasher::sha256();
let ctrs = [(ctr >> 24) as u8, (ctr >> 16) as u8, (ctr >> 8) as u8, ctr as u8];
hasher.update(&ctrs);
hasher.update(secret);
hasher.update(s1);
let d = hasher.finish();
&mut dest[written..(written + 32)].copy_from_slice(&d);
written += 32;
ctr += 1;
}
}
fn kdf(secret: &Secret, s1: &[u8], dest: &mut [u8]) {
// SEC/ISO/Shoup specify counter size SHOULD be equivalent
// to size of hash output, however, it also notes that
// the 4 bytes is okay. NIST specifies 4 bytes.
let mut ctr = 1u32;
let mut written = 0usize;
while written < dest.len() {
let mut hasher = digest::Hasher::sha256();
let ctrs = [
(ctr >> 24) as u8,
(ctr >> 16) as u8,
(ctr >> 8) as u8,
ctr as u8,
];
hasher.update(&ctrs);
hasher.update(secret);
hasher.update(s1);
let d = hasher.finish();
&mut dest[written..(written + 32)].copy_from_slice(&d);
written += 32;
ctr += 1;
}
}
}
#[cfg(test)]
mod tests {
use super::ecies;
use {Random, Generator};
use super::ecies;
use Generator;
use Random;
#[test]
fn ecies_shared() {
let kp = Random.generate().unwrap();
let message = b"So many books, so little time";
#[test]
fn ecies_shared() {
let kp = Random.generate().unwrap();
let message = b"So many books, so little time";
let shared = b"shared";
let wrong_shared = b"incorrect";
let encrypted = ecies::encrypt(kp.public(), shared, message).unwrap();
assert!(encrypted[..] != message[..]);
assert_eq!(encrypted[0], 0x04);
let shared = b"shared";
let wrong_shared = b"incorrect";
let encrypted = ecies::encrypt(kp.public(), shared, message).unwrap();
assert!(encrypted[..] != message[..]);
assert_eq!(encrypted[0], 0x04);
assert!(ecies::decrypt(kp.secret(), wrong_shared, &encrypted).is_err());
let decrypted = ecies::decrypt(kp.secret(), shared, &encrypted).unwrap();
assert_eq!(decrypted[..message.len()], message[..]);
}
assert!(ecies::decrypt(kp.secret(), wrong_shared, &encrypted).is_err());
let decrypted = ecies::decrypt(kp.secret(), shared, &encrypted).unwrap();
assert_eq!(decrypted[..message.len()], message[..]);
}
}

View File

@ -14,68 +14,68 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::{fmt, error};
use std::{error, fmt};
#[derive(Debug)]
/// Crypto error
pub enum Error {
/// Invalid secret key
InvalidSecret,
/// Invalid public key
InvalidPublic,
/// Invalid address
InvalidAddress,
/// Invalid EC signature
InvalidSignature,
/// Invalid AES message
InvalidMessage,
/// IO Error
Io(::std::io::Error),
/// Custom
Custom(String),
/// Invalid secret key
InvalidSecret,
/// Invalid public key
InvalidPublic,
/// Invalid address
InvalidAddress,
/// Invalid EC signature
InvalidSignature,
/// Invalid AES message
InvalidMessage,
/// IO Error
Io(::std::io::Error),
/// Custom
Custom(String),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let msg = match *self {
Error::InvalidSecret => "Invalid secret".into(),
Error::InvalidPublic => "Invalid public".into(),
Error::InvalidAddress => "Invalid address".into(),
Error::InvalidSignature => "Invalid EC signature".into(),
Error::InvalidMessage => "Invalid AES message".into(),
Error::Io(ref err) => format!("I/O error: {}", err),
Error::Custom(ref s) => s.clone(),
};
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let msg = match *self {
Error::InvalidSecret => "Invalid secret".into(),
Error::InvalidPublic => "Invalid public".into(),
Error::InvalidAddress => "Invalid address".into(),
Error::InvalidSignature => "Invalid EC signature".into(),
Error::InvalidMessage => "Invalid AES message".into(),
Error::Io(ref err) => format!("I/O error: {}", err),
Error::Custom(ref s) => s.clone(),
};
f.write_fmt(format_args!("Crypto error ({})", msg))
}
f.write_fmt(format_args!("Crypto error ({})", msg))
}
}
impl error::Error for Error {
fn description(&self) -> &str {
"Crypto error"
}
fn description(&self) -> &str {
"Crypto error"
}
}
impl Into<String> for Error {
fn into(self) -> String {
format!("{}", self)
}
fn into(self) -> String {
format!("{}", self)
}
}
impl From<::secp256k1::Error> for Error {
fn from(e: ::secp256k1::Error) -> Error {
match e {
::secp256k1::Error::InvalidMessage => Error::InvalidMessage,
::secp256k1::Error::InvalidPublicKey => Error::InvalidPublic,
::secp256k1::Error::InvalidSecretKey => Error::InvalidSecret,
_ => Error::InvalidSignature,
}
}
fn from(e: ::secp256k1::Error) -> Error {
match e {
::secp256k1::Error::InvalidMessage => Error::InvalidMessage,
::secp256k1::Error::InvalidPublicKey => Error::InvalidPublic,
::secp256k1::Error::InvalidSecretKey => Error::InvalidSecret,
_ => Error::InvalidSignature,
}
}
}
impl From<::std::io::Error> for Error {
fn from(err: ::std::io::Error) -> Error {
Error::Io(err)
}
fn from(err: ::std::io::Error) -> Error {
Error::Io(err)
}
}

View File

@ -16,485 +16,574 @@
//! Extended keys
pub use self::derivation::Error as DerivationError;
use ethereum_types::H256;
use secret::Secret;
use Public;
use ethereum_types::H256;
pub use self::derivation::Error as DerivationError;
/// Represents label that can be stored as a part of key derivation
pub trait Label {
/// Length of the data that label occupies
fn len() -> usize;
/// Length of the data that label occupies
fn len() -> usize;
/// Store label data to the key derivation sequence
/// Must not use more than `len()` bytes from slice
fn store(&self, target: &mut [u8]);
/// Store label data to the key derivation sequence
/// Must not use more than `len()` bytes from slice
fn store(&self, target: &mut [u8]);
}
impl Label for u32 {
fn len() -> usize { 4 }
fn len() -> usize {
4
}
fn store(&self, target: &mut [u8]) {
let bytes = self.to_be_bytes();
target[0..4].copy_from_slice(&bytes);
}
fn store(&self, target: &mut [u8]) {
let bytes = self.to_be_bytes();
target[0..4].copy_from_slice(&bytes);
}
}
/// Key derivation over generic label `T`
pub enum Derivation<T: Label> {
/// Soft key derivation (allow proof of parent)
Soft(T),
/// Hard key derivation (does not allow proof of parent)
Hard(T),
/// Soft key derivation (allow proof of parent)
Soft(T),
/// Hard key derivation (does not allow proof of parent)
Hard(T),
}
impl From<u32> for Derivation<u32> {
fn from(index: u32) -> Self {
if index < (2 << 30) {
Derivation::Soft(index)
}
else {
Derivation::Hard(index)
}
}
fn from(index: u32) -> Self {
if index < (2 << 30) {
Derivation::Soft(index)
} else {
Derivation::Hard(index)
}
}
}
impl Label for H256 {
fn len() -> usize { 32 }
fn len() -> usize {
32
}
fn store(&self, target: &mut [u8]) {
self.copy_to(&mut target[0..32]);
}
fn store(&self, target: &mut [u8]) {
self.copy_to(&mut target[0..32]);
}
}
/// Extended secret key, allows deterministic derivation of subsequent keys.
pub struct ExtendedSecret {
secret: Secret,
chain_code: H256,
secret: Secret,
chain_code: H256,
}
impl ExtendedSecret {
/// New extended key from given secret and chain code.
pub fn with_code(secret: Secret, chain_code: H256) -> ExtendedSecret {
ExtendedSecret {
secret: secret,
chain_code: chain_code,
}
}
/// New extended key from given secret and chain code.
pub fn with_code(secret: Secret, chain_code: H256) -> ExtendedSecret {
ExtendedSecret {
secret: secret,
chain_code: chain_code,
}
}
/// New extended key from given secret with the random chain code.
pub fn new_random(secret: Secret) -> ExtendedSecret {
ExtendedSecret::with_code(secret, H256::random())
}
/// New extended key from given secret with the random chain code.
pub fn new_random(secret: Secret) -> ExtendedSecret {
ExtendedSecret::with_code(secret, H256::random())
}
/// New extended key from given secret.
/// Chain code will be derived from the secret itself (in a deterministic way).
pub fn new(secret: Secret) -> ExtendedSecret {
let chain_code = derivation::chain_code(*secret);
ExtendedSecret::with_code(secret, chain_code)
}
/// New extended key from given secret.
/// Chain code will be derived from the secret itself (in a deterministic way).
pub fn new(secret: Secret) -> ExtendedSecret {
let chain_code = derivation::chain_code(*secret);
ExtendedSecret::with_code(secret, chain_code)
}
/// Derive new private key
pub fn derive<T>(&self, index: Derivation<T>) -> ExtendedSecret where T: Label {
let (derived_key, next_chain_code) = derivation::private(*self.secret, self.chain_code, index);
/// Derive new private key
pub fn derive<T>(&self, index: Derivation<T>) -> ExtendedSecret
where
T: Label,
{
let (derived_key, next_chain_code) =
derivation::private(*self.secret, self.chain_code, index);
let derived_secret = Secret::from(derived_key.0);
let derived_secret = Secret::from(derived_key.0);
ExtendedSecret::with_code(derived_secret, next_chain_code)
}
ExtendedSecret::with_code(derived_secret, next_chain_code)
}
/// Private key component of the extended key.
pub fn as_raw(&self) -> &Secret {
&self.secret
}
/// Private key component of the extended key.
pub fn as_raw(&self) -> &Secret {
&self.secret
}
}
/// Extended public key, allows deterministic derivation of subsequent keys.
pub struct ExtendedPublic {
public: Public,
chain_code: H256,
public: Public,
chain_code: H256,
}
impl ExtendedPublic {
/// New extended public key from known parent and chain code
pub fn new(public: Public, chain_code: H256) -> Self {
ExtendedPublic { public: public, chain_code: chain_code }
}
/// New extended public key from known parent and chain code
pub fn new(public: Public, chain_code: H256) -> Self {
ExtendedPublic {
public: public,
chain_code: chain_code,
}
}
/// Create new extended public key from known secret
pub fn from_secret(secret: &ExtendedSecret) -> Result<Self, DerivationError> {
Ok(
ExtendedPublic::new(
derivation::point(**secret.as_raw())?,
secret.chain_code.clone(),
)
)
}
/// Create new extended public key from known secret
pub fn from_secret(secret: &ExtendedSecret) -> Result<Self, DerivationError> {
Ok(ExtendedPublic::new(
derivation::point(**secret.as_raw())?,
secret.chain_code.clone(),
))
}
/// Derive new public key
/// Operation is defined only for index belongs [0..2^31)
pub fn derive<T>(&self, index: Derivation<T>) -> Result<Self, DerivationError> where T: Label {
let (derived_key, next_chain_code) = derivation::public(self.public, self.chain_code, index)?;
Ok(ExtendedPublic::new(derived_key, next_chain_code))
}
/// Derive new public key
/// Operation is defined only for index belongs [0..2^31)
pub fn derive<T>(&self, index: Derivation<T>) -> Result<Self, DerivationError>
where
T: Label,
{
let (derived_key, next_chain_code) =
derivation::public(self.public, self.chain_code, index)?;
Ok(ExtendedPublic::new(derived_key, next_chain_code))
}
pub fn public(&self) -> &Public {
&self.public
}
pub fn public(&self) -> &Public {
&self.public
}
}
pub struct ExtendedKeyPair {
secret: ExtendedSecret,
public: ExtendedPublic,
secret: ExtendedSecret,
public: ExtendedPublic,
}
impl ExtendedKeyPair {
pub fn new(secret: Secret) -> Self {
let extended_secret = ExtendedSecret::new(secret);
let extended_public = ExtendedPublic::from_secret(&extended_secret)
.expect("Valid `Secret` always produces valid public; qed");
ExtendedKeyPair {
secret: extended_secret,
public: extended_public,
}
}
pub fn new(secret: Secret) -> Self {
let extended_secret = ExtendedSecret::new(secret);
let extended_public = ExtendedPublic::from_secret(&extended_secret)
.expect("Valid `Secret` always produces valid public; qed");
ExtendedKeyPair {
secret: extended_secret,
public: extended_public,
}
}
pub fn with_code(secret: Secret, public: Public, chain_code: H256) -> Self {
ExtendedKeyPair {
secret: ExtendedSecret::with_code(secret, chain_code.clone()),
public: ExtendedPublic::new(public, chain_code),
}
}
pub fn with_code(secret: Secret, public: Public, chain_code: H256) -> Self {
ExtendedKeyPair {
secret: ExtendedSecret::with_code(secret, chain_code.clone()),
public: ExtendedPublic::new(public, chain_code),
}
}
pub fn with_secret(secret: Secret, chain_code: H256) -> Self {
let extended_secret = ExtendedSecret::with_code(secret, chain_code);
let extended_public = ExtendedPublic::from_secret(&extended_secret)
.expect("Valid `Secret` always produces valid public; qed");
ExtendedKeyPair {
secret: extended_secret,
public: extended_public,
}
}
pub fn with_secret(secret: Secret, chain_code: H256) -> Self {
let extended_secret = ExtendedSecret::with_code(secret, chain_code);
let extended_public = ExtendedPublic::from_secret(&extended_secret)
.expect("Valid `Secret` always produces valid public; qed");
ExtendedKeyPair {
secret: extended_secret,
public: extended_public,
}
}
pub fn with_seed(seed: &[u8]) -> Result<ExtendedKeyPair, DerivationError> {
let (master_key, chain_code) = derivation::seed_pair(seed);
Ok(ExtendedKeyPair::with_secret(
Secret::from_unsafe_slice(&*master_key).map_err(|_| DerivationError::InvalidSeed)?,
chain_code,
))
}
pub fn with_seed(seed: &[u8]) -> Result<ExtendedKeyPair, DerivationError> {
let (master_key, chain_code) = derivation::seed_pair(seed);
Ok(ExtendedKeyPair::with_secret(
Secret::from_unsafe_slice(&*master_key).map_err(|_| DerivationError::InvalidSeed)?,
chain_code,
))
}
pub fn secret(&self) -> &ExtendedSecret {
&self.secret
}
pub fn secret(&self) -> &ExtendedSecret {
&self.secret
}
pub fn public(&self) -> &ExtendedPublic {
&self.public
}
pub fn public(&self) -> &ExtendedPublic {
&self.public
}
pub fn derive<T>(&self, index: Derivation<T>) -> Result<Self, DerivationError> where T: Label {
let derived = self.secret.derive(index);
pub fn derive<T>(&self, index: Derivation<T>) -> Result<Self, DerivationError>
where
T: Label,
{
let derived = self.secret.derive(index);
Ok(ExtendedKeyPair {
public: ExtendedPublic::from_secret(&derived)?,
secret: derived,
})
}
Ok(ExtendedKeyPair {
public: ExtendedPublic::from_secret(&derived)?,
secret: derived,
})
}
}
// Derivation functions for private and public keys
// Work is based on BIP0032
// https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki
mod derivation {
use parity_crypto::hmac;
use ethereum_types::{U256, U512, H512, H256};
use secp256k1::key::{SecretKey, PublicKey};
use SECP256K1;
use keccak;
use math::curve_order;
use super::{Label, Derivation};
use super::{Derivation, Label};
use ethereum_types::{H256, H512, U256, U512};
use keccak;
use math::curve_order;
use parity_crypto::hmac;
use secp256k1::key::{PublicKey, SecretKey};
use SECP256K1;
#[derive(Debug)]
pub enum Error {
InvalidHardenedUse,
InvalidPoint,
MissingIndex,
InvalidSeed,
}
#[derive(Debug)]
pub enum Error {
InvalidHardenedUse,
InvalidPoint,
MissingIndex,
InvalidSeed,
}
// Deterministic derivation of the key using secp256k1 elliptic curve.
// Derivation can be either hardened or not.
// For hardened derivation, pass u32 index at least 2^31 or custom Derivation::Hard(T) enum
//
// Can panic if passed `private_key` is not a valid secp256k1 private key
// (outside of (0..curve_order()]) field
pub fn private<T>(private_key: H256, chain_code: H256, index: Derivation<T>) -> (H256, H256) where T: Label {
match index {
Derivation::Soft(index) => private_soft(private_key, chain_code, index),
Derivation::Hard(index) => private_hard(private_key, chain_code, index),
}
}
// Deterministic derivation of the key using secp256k1 elliptic curve.
// Derivation can be either hardened or not.
// For hardened derivation, pass u32 index at least 2^31 or custom Derivation::Hard(T) enum
//
// Can panic if passed `private_key` is not a valid secp256k1 private key
// (outside of (0..curve_order()]) field
pub fn private<T>(private_key: H256, chain_code: H256, index: Derivation<T>) -> (H256, H256)
where
T: Label,
{
match index {
Derivation::Soft(index) => private_soft(private_key, chain_code, index),
Derivation::Hard(index) => private_hard(private_key, chain_code, index),
}
}
fn hmac_pair(data: &[u8], private_key: H256, chain_code: H256) -> (H256, H256) {
let private: U256 = private_key.into();
fn hmac_pair(data: &[u8], private_key: H256, chain_code: H256) -> (H256, H256) {
let private: U256 = private_key.into();
// produces 512-bit derived hmac (I)
let skey = hmac::SigKey::sha512(&*chain_code);
let i_512 = hmac::sign(&skey, &data[..]);
// produces 512-bit derived hmac (I)
let skey = hmac::SigKey::sha512(&*chain_code);
let i_512 = hmac::sign(&skey, &data[..]);
// left most 256 bits are later added to original private key
let hmac_key: U256 = H256::from_slice(&i_512[0..32]).into();
// right most 256 bits are new chain code for later derivations
let next_chain_code = H256::from(&i_512[32..64]);
// left most 256 bits are later added to original private key
let hmac_key: U256 = H256::from_slice(&i_512[0..32]).into();
// right most 256 bits are new chain code for later derivations
let next_chain_code = H256::from(&i_512[32..64]);
let child_key = private_add(hmac_key, private).into();
(child_key, next_chain_code)
}
let child_key = private_add(hmac_key, private).into();
(child_key, next_chain_code)
}
// Can panic if passed `private_key` is not a valid secp256k1 private key
// (outside of (0..curve_order()]) field
fn private_soft<T>(private_key: H256, chain_code: H256, index: T) -> (H256, H256) where T: Label {
let mut data = vec![0u8; 33 + T::len()];
// Can panic if passed `private_key` is not a valid secp256k1 private key
// (outside of (0..curve_order()]) field
fn private_soft<T>(private_key: H256, chain_code: H256, index: T) -> (H256, H256)
where
T: Label,
{
let mut data = vec![0u8; 33 + T::len()];
let sec_private = SecretKey::from_slice(&SECP256K1, &*private_key)
.expect("Caller should provide valid private key");
let sec_public = PublicKey::from_secret_key(&SECP256K1, &sec_private)
.expect("Caller should provide valid private key");
let public_serialized = sec_public.serialize_vec(&SECP256K1, true);
let sec_private = SecretKey::from_slice(&SECP256K1, &*private_key)
.expect("Caller should provide valid private key");
let sec_public = PublicKey::from_secret_key(&SECP256K1, &sec_private)
.expect("Caller should provide valid private key");
let public_serialized = sec_public.serialize_vec(&SECP256K1, true);
// curve point (compressed public key) -- index
// 0.33 -- 33..end
data[0..33].copy_from_slice(&public_serialized);
index.store(&mut data[33..]);
// curve point (compressed public key) -- index
// 0.33 -- 33..end
data[0..33].copy_from_slice(&public_serialized);
index.store(&mut data[33..]);
hmac_pair(&data, private_key, chain_code)
}
hmac_pair(&data, private_key, chain_code)
}
// Deterministic derivation of the key using secp256k1 elliptic curve
// This is hardened derivation and does not allow to associate
// corresponding public keys of the original and derived private keys
fn private_hard<T>(private_key: H256, chain_code: H256, index: T) -> (H256, H256) where T: Label {
let mut data: Vec<u8> = vec![0u8; 33 + T::len()];
let private: U256 = private_key.into();
// Deterministic derivation of the key using secp256k1 elliptic curve
// This is hardened derivation and does not allow to associate
// corresponding public keys of the original and derived private keys
fn private_hard<T>(private_key: H256, chain_code: H256, index: T) -> (H256, H256)
where
T: Label,
{
let mut data: Vec<u8> = vec![0u8; 33 + T::len()];
let private: U256 = private_key.into();
// 0x00 (padding) -- private_key -- index
// 0 -- 1..33 -- 33..end
private.to_big_endian(&mut data[1..33]);
index.store(&mut data[33..(33 + T::len())]);
// 0x00 (padding) -- private_key -- index
// 0 -- 1..33 -- 33..end
private.to_big_endian(&mut data[1..33]);
index.store(&mut data[33..(33 + T::len())]);
hmac_pair(&data, private_key, chain_code)
}
hmac_pair(&data, private_key, chain_code)
}
fn private_add(k1: U256, k2: U256) -> U256 {
let sum = U512::from(k1) + U512::from(k2);
modulo(sum, curve_order())
}
fn private_add(k1: U256, k2: U256) -> U256 {
let sum = U512::from(k1) + U512::from(k2);
modulo(sum, curve_order())
}
// todo: surely can be optimized
fn modulo(u1: U512, u2: U256) -> U256 {
let dv = u1 / U512::from(u2);
let md = u1 - (dv * U512::from(u2));
md.into()
}
// todo: surely can be optimized
fn modulo(u1: U512, u2: U256) -> U256 {
let dv = u1 / U512::from(u2);
let md = u1 - (dv * U512::from(u2));
md.into()
}
pub fn public<T>(public_key: H512, chain_code: H256, derivation: Derivation<T>) -> Result<(H512, H256), Error> where T: Label {
let index = match derivation {
Derivation::Soft(index) => index,
Derivation::Hard(_) => { return Err(Error::InvalidHardenedUse); }
};
pub fn public<T>(
public_key: H512,
chain_code: H256,
derivation: Derivation<T>,
) -> Result<(H512, H256), Error>
where
T: Label,
{
let index = match derivation {
Derivation::Soft(index) => index,
Derivation::Hard(_) => {
return Err(Error::InvalidHardenedUse);
}
};
let mut public_sec_raw = [0u8; 65];
public_sec_raw[0] = 4;
public_sec_raw[1..65].copy_from_slice(&*public_key);
let public_sec = PublicKey::from_slice(&SECP256K1, &public_sec_raw).map_err(|_| Error::InvalidPoint)?;
let public_serialized = public_sec.serialize_vec(&SECP256K1, true);
let mut public_sec_raw = [0u8; 65];
public_sec_raw[0] = 4;
public_sec_raw[1..65].copy_from_slice(&*public_key);
let public_sec =
PublicKey::from_slice(&SECP256K1, &public_sec_raw).map_err(|_| Error::InvalidPoint)?;
let public_serialized = public_sec.serialize_vec(&SECP256K1, true);
let mut data = vec![0u8; 33 + T::len()];
// curve point (compressed public key) -- index
// 0.33 -- 33..end
data[0..33].copy_from_slice(&public_serialized);
index.store(&mut data[33..(33 + T::len())]);
let mut data = vec![0u8; 33 + T::len()];
// curve point (compressed public key) -- index
// 0.33 -- 33..end
data[0..33].copy_from_slice(&public_serialized);
index.store(&mut data[33..(33 + T::len())]);
// HMAC512SHA produces [derived private(256); new chain code(256)]
let skey = hmac::SigKey::sha512(&*chain_code);
let i_512 = hmac::sign(&skey, &data[..]);
// HMAC512SHA produces [derived private(256); new chain code(256)]
let skey = hmac::SigKey::sha512(&*chain_code);
let i_512 = hmac::sign(&skey, &data[..]);
let new_private = H256::from(&i_512[0..32]);
let new_chain_code = H256::from(&i_512[32..64]);
let new_private = H256::from(&i_512[0..32]);
let new_chain_code = H256::from(&i_512[32..64]);
// Generated private key can (extremely rarely) be out of secp256k1 key field
if curve_order() <= new_private.clone().into() { return Err(Error::MissingIndex); }
let new_private_sec = SecretKey::from_slice(&SECP256K1, &*new_private)
// Generated private key can (extremely rarely) be out of secp256k1 key field
if curve_order() <= new_private.clone().into() {
return Err(Error::MissingIndex);
}
let new_private_sec = SecretKey::from_slice(&SECP256K1, &*new_private)
.expect("Private key belongs to the field [0..CURVE_ORDER) (checked above); So initializing can never fail; qed");
let mut new_public = PublicKey::from_secret_key(&SECP256K1, &new_private_sec)
.expect("Valid private key produces valid public key");
let mut new_public = PublicKey::from_secret_key(&SECP256K1, &new_private_sec)
.expect("Valid private key produces valid public key");
// Adding two points on the elliptic curves (combining two public keys)
new_public.add_assign(&SECP256K1, &public_sec)
.expect("Addition of two valid points produce valid point");
// Adding two points on the elliptic curves (combining two public keys)
new_public
.add_assign(&SECP256K1, &public_sec)
.expect("Addition of two valid points produce valid point");
let serialized = new_public.serialize_vec(&SECP256K1, false);
let serialized = new_public.serialize_vec(&SECP256K1, false);
Ok((
H512::from(&serialized[1..65]),
new_chain_code,
))
}
Ok((H512::from(&serialized[1..65]), new_chain_code))
}
fn sha3(slc: &[u8]) -> H256 {
keccak::Keccak256::keccak256(slc).into()
}
fn sha3(slc: &[u8]) -> H256 {
keccak::Keccak256::keccak256(slc).into()
}
pub fn chain_code(secret: H256) -> H256 {
// 10,000 rounds of sha3
let mut running_sha3 = sha3(&*secret);
for _ in 0..99999 { running_sha3 = sha3(&*running_sha3); }
running_sha3
}
pub fn chain_code(secret: H256) -> H256 {
// 10,000 rounds of sha3
let mut running_sha3 = sha3(&*secret);
for _ in 0..99999 {
running_sha3 = sha3(&*running_sha3);
}
running_sha3
}
pub fn point(secret: H256) -> Result<H512, Error> {
let sec = SecretKey::from_slice(&SECP256K1, &*secret)
.map_err(|_| Error::InvalidPoint)?;
let public_sec = PublicKey::from_secret_key(&SECP256K1, &sec)
.map_err(|_| Error::InvalidPoint)?;
let serialized = public_sec.serialize_vec(&SECP256K1, false);
Ok(H512::from(&serialized[1..65]))
}
pub fn point(secret: H256) -> Result<H512, Error> {
let sec = SecretKey::from_slice(&SECP256K1, &*secret).map_err(|_| Error::InvalidPoint)?;
let public_sec =
PublicKey::from_secret_key(&SECP256K1, &sec).map_err(|_| Error::InvalidPoint)?;
let serialized = public_sec.serialize_vec(&SECP256K1, false);
Ok(H512::from(&serialized[1..65]))
}
pub fn seed_pair(seed: &[u8]) -> (H256, H256) {
let skey = hmac::SigKey::sha512(b"Bitcoin seed");
let i_512 = hmac::sign(&skey, seed);
pub fn seed_pair(seed: &[u8]) -> (H256, H256) {
let skey = hmac::SigKey::sha512(b"Bitcoin seed");
let i_512 = hmac::sign(&skey, seed);
let master_key = H256::from_slice(&i_512[0..32]);
let chain_code = H256::from_slice(&i_512[32..64]);
let master_key = H256::from_slice(&i_512[0..32]);
let chain_code = H256::from_slice(&i_512[32..64]);
(master_key, chain_code)
}
(master_key, chain_code)
}
}
#[cfg(test)]
mod tests {
use super::{ExtendedSecret, ExtendedPublic, ExtendedKeyPair};
use secret::Secret;
use std::str::FromStr;
use ethereum_types::{H128, H256};
use super::{derivation, Derivation};
use super::{derivation, Derivation, ExtendedKeyPair, ExtendedPublic, ExtendedSecret};
use ethereum_types::{H128, H256};
use secret::Secret;
use std::str::FromStr;
fn master_chain_basic() -> (H256, H256) {
let seed = H128::from_str("000102030405060708090a0b0c0d0e0f")
.expect("Seed should be valid H128")
.to_vec();
fn master_chain_basic() -> (H256, H256) {
let seed = H128::from_str("000102030405060708090a0b0c0d0e0f")
.expect("Seed should be valid H128")
.to_vec();
derivation::seed_pair(&*seed)
}
derivation::seed_pair(&*seed)
}
fn test_extended<F>(f: F, test_private: H256) where F: Fn(ExtendedSecret) -> ExtendedSecret {
let (private_seed, chain_code) = master_chain_basic();
let extended_secret = ExtendedSecret::with_code(Secret::from(private_seed.0), chain_code);
let derived = f(extended_secret);
assert_eq!(**derived.as_raw(), test_private);
}
fn test_extended<F>(f: F, test_private: H256)
where
F: Fn(ExtendedSecret) -> ExtendedSecret,
{
let (private_seed, chain_code) = master_chain_basic();
let extended_secret = ExtendedSecret::with_code(Secret::from(private_seed.0), chain_code);
let derived = f(extended_secret);
assert_eq!(**derived.as_raw(), test_private);
}
#[test]
fn smoky() {
let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap();
let extended_secret = ExtendedSecret::with_code(secret.clone(), 0u64.into());
#[test]
fn smoky() {
let secret =
Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65")
.unwrap();
let extended_secret = ExtendedSecret::with_code(secret.clone(), 0u64.into());
// hardened
assert_eq!(&**extended_secret.as_raw(), &*secret);
assert_eq!(&**extended_secret.derive(2147483648.into()).as_raw(), &"0927453daed47839608e414a3738dfad10aed17c459bbd9ab53f89b026c834b6".into());
assert_eq!(&**extended_secret.derive(2147483649.into()).as_raw(), &"44238b6a29c6dcbe9b401364141ba11e2198c289a5fed243a1c11af35c19dc0f".into());
// hardened
assert_eq!(&**extended_secret.as_raw(), &*secret);
assert_eq!(
&**extended_secret.derive(2147483648.into()).as_raw(),
&"0927453daed47839608e414a3738dfad10aed17c459bbd9ab53f89b026c834b6".into()
);
assert_eq!(
&**extended_secret.derive(2147483649.into()).as_raw(),
&"44238b6a29c6dcbe9b401364141ba11e2198c289a5fed243a1c11af35c19dc0f".into()
);
// normal
assert_eq!(&**extended_secret.derive(0.into()).as_raw(), &"bf6a74e3f7b36fc4c96a1e12f31abc817f9f5904f5a8fc27713163d1f0b713f6".into());
assert_eq!(&**extended_secret.derive(1.into()).as_raw(), &"bd4fca9eb1f9c201e9448c1eecd66e302d68d4d313ce895b8c134f512205c1bc".into());
assert_eq!(&**extended_secret.derive(2.into()).as_raw(), &"86932b542d6cab4d9c65490c7ef502d89ecc0e2a5f4852157649e3251e2a3268".into());
// normal
assert_eq!(
&**extended_secret.derive(0.into()).as_raw(),
&"bf6a74e3f7b36fc4c96a1e12f31abc817f9f5904f5a8fc27713163d1f0b713f6".into()
);
assert_eq!(
&**extended_secret.derive(1.into()).as_raw(),
&"bd4fca9eb1f9c201e9448c1eecd66e302d68d4d313ce895b8c134f512205c1bc".into()
);
assert_eq!(
&**extended_secret.derive(2.into()).as_raw(),
&"86932b542d6cab4d9c65490c7ef502d89ecc0e2a5f4852157649e3251e2a3268".into()
);
let extended_public = ExtendedPublic::from_secret(&extended_secret).expect("Extended public should be created");
let derived_public = extended_public.derive(0.into()).expect("First derivation of public should succeed");
assert_eq!(&*derived_public.public(), &"f7b3244c96688f92372bfd4def26dc4151529747bab9f188a4ad34e141d47bd66522ff048bc6f19a0a4429b04318b1a8796c000265b4fa200dae5f6dda92dd94".into());
let extended_public = ExtendedPublic::from_secret(&extended_secret)
.expect("Extended public should be created");
let derived_public = extended_public
.derive(0.into())
.expect("First derivation of public should succeed");
assert_eq!(&*derived_public.public(), &"f7b3244c96688f92372bfd4def26dc4151529747bab9f188a4ad34e141d47bd66522ff048bc6f19a0a4429b04318b1a8796c000265b4fa200dae5f6dda92dd94".into());
let keypair = ExtendedKeyPair::with_secret(
Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(),
064.into(),
);
assert_eq!(&**keypair.derive(2147483648u32.into()).expect("Derivation of keypair should succeed").secret().as_raw(), &"edef54414c03196557cf73774bc97a645c9a1df2164ed34f0c2a78d1375a930c".into());
}
let keypair = ExtendedKeyPair::with_secret(
Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65")
.unwrap(),
064.into(),
);
assert_eq!(
&**keypair
.derive(2147483648u32.into())
.expect("Derivation of keypair should succeed")
.secret()
.as_raw(),
&"edef54414c03196557cf73774bc97a645c9a1df2164ed34f0c2a78d1375a930c".into()
);
}
#[test]
fn h256_soft_match() {
let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap();
let derivation_secret = H256::from_str("51eaf04f9dbbc1417dc97e789edd0c37ecda88bac490434e367ea81b71b7b015").unwrap();
#[test]
fn h256_soft_match() {
let secret =
Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65")
.unwrap();
let derivation_secret =
H256::from_str("51eaf04f9dbbc1417dc97e789edd0c37ecda88bac490434e367ea81b71b7b015")
.unwrap();
let extended_secret = ExtendedSecret::with_code(secret.clone(), 0u64.into());
let extended_public = ExtendedPublic::from_secret(&extended_secret).expect("Extended public should be created");
let extended_secret = ExtendedSecret::with_code(secret.clone(), 0u64.into());
let extended_public = ExtendedPublic::from_secret(&extended_secret)
.expect("Extended public should be created");
let derived_secret0 = extended_secret.derive(Derivation::Soft(derivation_secret));
let derived_public0 = extended_public.derive(Derivation::Soft(derivation_secret)).expect("First derivation of public should succeed");
let derived_secret0 = extended_secret.derive(Derivation::Soft(derivation_secret));
let derived_public0 = extended_public
.derive(Derivation::Soft(derivation_secret))
.expect("First derivation of public should succeed");
let public_from_secret0 = ExtendedPublic::from_secret(&derived_secret0).expect("Extended public should be created");
let public_from_secret0 = ExtendedPublic::from_secret(&derived_secret0)
.expect("Extended public should be created");
assert_eq!(public_from_secret0.public(), derived_public0.public());
}
assert_eq!(public_from_secret0.public(), derived_public0.public());
}
#[test]
fn h256_hard() {
let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap();
let derivation_secret = H256::from_str("51eaf04f9dbbc1417dc97e789edd0c37ecda88bac490434e367ea81b71b7b015").unwrap();
let extended_secret = ExtendedSecret::with_code(secret.clone(), 1u64.into());
#[test]
fn h256_hard() {
let secret =
Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65")
.unwrap();
let derivation_secret =
H256::from_str("51eaf04f9dbbc1417dc97e789edd0c37ecda88bac490434e367ea81b71b7b015")
.unwrap();
let extended_secret = ExtendedSecret::with_code(secret.clone(), 1u64.into());
assert_eq!(&**extended_secret.derive(Derivation::Hard(derivation_secret)).as_raw(), &"2bc2d696fb744d77ff813b4a1ef0ad64e1e5188b622c54ba917acc5ebc7c5486".into());
}
assert_eq!(
&**extended_secret
.derive(Derivation::Hard(derivation_secret))
.as_raw(),
&"2bc2d696fb744d77ff813b4a1ef0ad64e1e5188b622c54ba917acc5ebc7c5486".into()
);
}
#[test]
fn match_() {
let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap();
let extended_secret = ExtendedSecret::with_code(secret.clone(), 1.into());
let extended_public = ExtendedPublic::from_secret(&extended_secret).expect("Extended public should be created");
#[test]
fn match_() {
let secret =
Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65")
.unwrap();
let extended_secret = ExtendedSecret::with_code(secret.clone(), 1.into());
let extended_public = ExtendedPublic::from_secret(&extended_secret)
.expect("Extended public should be created");
let derived_secret0 = extended_secret.derive(0.into());
let derived_public0 = extended_public.derive(0.into()).expect("First derivation of public should succeed");
let derived_secret0 = extended_secret.derive(0.into());
let derived_public0 = extended_public
.derive(0.into())
.expect("First derivation of public should succeed");
let public_from_secret0 = ExtendedPublic::from_secret(&derived_secret0).expect("Extended public should be created");
let public_from_secret0 = ExtendedPublic::from_secret(&derived_secret0)
.expect("Extended public should be created");
assert_eq!(public_from_secret0.public(), derived_public0.public());
}
assert_eq!(public_from_secret0.public(), derived_public0.public());
}
#[test]
fn test_seeds() {
let seed = H128::from_str("000102030405060708090a0b0c0d0e0f")
.expect("Seed should be valid H128")
.to_vec();
#[test]
fn test_seeds() {
let seed = H128::from_str("000102030405060708090a0b0c0d0e0f")
.expect("Seed should be valid H128")
.to_vec();
// private key from bitcoin test vector
// xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs
let test_private = H256::from_str("e8f32e723decf4051aefac8e2c93c9c5b214313817cdb01a1494b917c8436b35")
.expect("Private should be decoded ok");
// private key from bitcoin test vector
// xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs
let test_private =
H256::from_str("e8f32e723decf4051aefac8e2c93c9c5b214313817cdb01a1494b917c8436b35")
.expect("Private should be decoded ok");
let (private_seed, _) = derivation::seed_pair(&*seed);
let (private_seed, _) = derivation::seed_pair(&*seed);
assert_eq!(private_seed, test_private);
}
assert_eq!(private_seed, test_private);
}
#[test]
fn test_vector_1() {
// xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7
// H(0)
test_extended(
|secret| secret.derive(2147483648.into()),
H256::from_str("edb2e14f9ee77d26dd93b4ecede8d16ed408ce149b6cd80b0715a2d911a0afea")
.expect("Private should be decoded ok")
);
}
#[test]
fn test_vector_1() {
// xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7
// H(0)
test_extended(
|secret| secret.derive(2147483648.into()),
H256::from_str("edb2e14f9ee77d26dd93b4ecede8d16ed408ce149b6cd80b0715a2d911a0afea")
.expect("Private should be decoded ok"),
);
}
#[test]
fn test_vector_2() {
// xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs
// H(0)/1
test_extended(
|secret| secret.derive(2147483648.into()).derive(1.into()),
H256::from_str("3c6cb8d0f6a264c91ea8b5030fadaa8e538b020f0a387421a12de9319dc93368")
.expect("Private should be decoded ok")
);
}
#[test]
fn test_vector_2() {
// xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs
// H(0)/1
test_extended(
|secret| secret.derive(2147483648.into()).derive(1.into()),
H256::from_str("3c6cb8d0f6a264c91ea8b5030fadaa8e538b020f0a387421a12de9319dc93368")
.expect("Private should be decoded ok"),
);
}
}

View File

@ -17,15 +17,17 @@
use tiny_keccak::Keccak;
pub trait Keccak256<T> {
fn keccak256(&self) -> T where T: Sized;
fn keccak256(&self) -> T
where
T: Sized;
}
impl Keccak256<[u8; 32]> for [u8] {
fn keccak256(&self) -> [u8; 32] {
let mut keccak = Keccak::new_keccak256();
let mut result = [0u8; 32];
keccak.update(self);
keccak.finalize(&mut result);
result
}
fn keccak256(&self) -> [u8; 32] {
let mut keccak = Keccak::new_keccak256();
let mut result = [0u8; 32];
keccak.update(self);
keccak.finalize(&mut result);
result
}
}

View File

@ -14,102 +14,107 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::fmt;
use secp256k1::key;
use rustc_hex::ToHex;
use super::{Address, Error, Public, Secret, SECP256K1};
use keccak::Keccak256;
use super::{Secret, Public, Address, SECP256K1, Error};
use rustc_hex::ToHex;
use secp256k1::key;
use std::fmt;
pub fn public_to_address(public: &Public) -> Address {
let hash = public.keccak256();
let mut result = Address::default();
result.copy_from_slice(&hash[12..]);
result
let hash = public.keccak256();
let mut result = Address::default();
result.copy_from_slice(&hash[12..]);
result
}
#[derive(Debug, Clone, PartialEq)]
/// secp256k1 key pair
pub struct KeyPair {
secret: Secret,
public: Public,
secret: Secret,
public: Public,
}
impl fmt::Display for KeyPair {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
writeln!(f, "secret: {}", self.secret.to_hex())?;
writeln!(f, "public: {}", self.public.to_hex())?;
write!(f, "address: {}", self.address().to_hex())
}
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
writeln!(f, "secret: {}", self.secret.to_hex())?;
writeln!(f, "public: {}", self.public.to_hex())?;
write!(f, "address: {}", self.address().to_hex())
}
}
impl KeyPair {
/// Create a pair from secret key
pub fn from_secret(secret: Secret) -> Result<KeyPair, Error> {
let context = &SECP256K1;
let s: key::SecretKey = key::SecretKey::from_slice(context, &secret[..])?;
let pub_key = key::PublicKey::from_secret_key(context, &s)?;
let serialized = pub_key.serialize_vec(context, false);
/// Create a pair from secret key
pub fn from_secret(secret: Secret) -> Result<KeyPair, Error> {
let context = &SECP256K1;
let s: key::SecretKey = key::SecretKey::from_slice(context, &secret[..])?;
let pub_key = key::PublicKey::from_secret_key(context, &s)?;
let serialized = pub_key.serialize_vec(context, false);
let mut public = Public::default();
public.copy_from_slice(&serialized[1..65]);
let mut public = Public::default();
public.copy_from_slice(&serialized[1..65]);
let keypair = KeyPair {
secret: secret,
public: public,
};
let keypair = KeyPair {
secret: secret,
public: public,
};
Ok(keypair)
}
Ok(keypair)
}
pub fn from_secret_slice(slice: &[u8]) -> Result<KeyPair, Error> {
Self::from_secret(Secret::from_unsafe_slice(slice)?)
}
pub fn from_secret_slice(slice: &[u8]) -> Result<KeyPair, Error> {
Self::from_secret(Secret::from_unsafe_slice(slice)?)
}
pub fn from_keypair(sec: key::SecretKey, publ: key::PublicKey) -> Self {
let context = &SECP256K1;
let serialized = publ.serialize_vec(context, false);
let secret = Secret::from(sec);
let mut public = Public::default();
public.copy_from_slice(&serialized[1..65]);
pub fn from_keypair(sec: key::SecretKey, publ: key::PublicKey) -> Self {
let context = &SECP256K1;
let serialized = publ.serialize_vec(context, false);
let secret = Secret::from(sec);
let mut public = Public::default();
public.copy_from_slice(&serialized[1..65]);
KeyPair {
secret: secret,
public: public,
}
}
KeyPair {
secret: secret,
public: public,
}
}
pub fn secret(&self) -> &Secret {
&self.secret
}
pub fn secret(&self) -> &Secret {
&self.secret
}
pub fn public(&self) -> &Public {
&self.public
}
pub fn public(&self) -> &Public {
&self.public
}
pub fn address(&self) -> Address {
public_to_address(&self.public)
}
pub fn address(&self) -> Address {
public_to_address(&self.public)
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use {KeyPair, Secret};
use std::str::FromStr;
use KeyPair;
use Secret;
#[test]
fn from_secret() {
let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap();
let _ = KeyPair::from_secret(secret).unwrap();
}
#[test]
fn from_secret() {
let secret =
Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65")
.unwrap();
let _ = KeyPair::from_secret(secret).unwrap();
}
#[test]
fn keypair_display() {
let expected =
#[test]
fn keypair_display() {
let expected =
"secret: a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65
public: 8ce0db0b0359ffc5866ba61903cc2518c3675ef2cf380a7e54bde7ea20e6fa1ab45b7617346cd11b7610001ee6ae5b0155c41cad9527cbcdff44ec67848943a4
address: 5b073e9233944b5e729e46d618f0d8edf3d9c34a".to_owned();
let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap();
let kp = KeyPair::from_secret(secret).unwrap();
assert_eq!(format!("{}", kp), expected);
}
let secret =
Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65")
.unwrap();
let kp = KeyPair::from_secret(secret).unwrap();
assert_eq!(format!("{}", kp), expected);
}
}

View File

@ -17,9 +17,9 @@
// #![warn(missing_docs)]
extern crate edit_distance;
extern crate parity_crypto;
extern crate ethereum_types;
extern crate memzero;
extern crate parity_crypto;
extern crate parity_wordlist;
#[macro_use]
extern crate quick_error;
@ -39,31 +39,33 @@ extern crate serde_derive;
mod brain;
mod brain_prefix;
mod error;
mod keypair;
mod extended;
mod keccak;
mod keypair;
mod password;
mod prefix;
mod random;
mod signature;
mod secret;
mod extended;
mod signature;
pub mod brain_recover;
pub mod crypto;
pub mod math;
pub use self::parity_wordlist::Error as WordlistError;
pub use self::brain::Brain;
pub use self::brain_prefix::BrainPrefix;
pub use self::error::Error;
pub use self::keypair::{KeyPair, public_to_address};
pub use self::math::public_is_valid;
pub use self::password::Password;
pub use self::prefix::Prefix;
pub use self::random::Random;
pub use self::signature::{sign, verify_public, verify_address, recover, Signature};
pub use self::secret::Secret;
pub use self::extended::{ExtendedPublic, ExtendedSecret, ExtendedKeyPair, DerivationError, Derivation};
pub use self::{
brain::Brain,
brain_prefix::BrainPrefix,
error::Error,
extended::{Derivation, DerivationError, ExtendedKeyPair, ExtendedPublic, ExtendedSecret},
keypair::{public_to_address, KeyPair},
math::public_is_valid,
parity_wordlist::Error as WordlistError,
password::Password,
prefix::Prefix,
random::Random,
secret::Secret,
signature::{recover, sign, verify_address, verify_public, Signature},
};
use ethereum_types::H256;
@ -71,7 +73,7 @@ pub use ethereum_types::{Address, Public};
pub type Message = H256;
lazy_static! {
pub static ref SECP256K1: secp256k1::Secp256k1 = secp256k1::Secp256k1::new();
pub static ref SECP256K1: secp256k1::Secp256k1 = secp256k1::Secp256k1::new();
}
/// Uninstantiatable error type for infallible generators.
@ -80,8 +82,8 @@ pub enum Void {}
/// Generates new keypair.
pub trait Generator {
type Error;
type Error;
/// Should be called to generate new keypair.
fn generate(&mut self) -> Result<KeyPair, Self::Error>;
/// Should be called to generate new keypair.
fn generate(&mut self) -> Result<KeyPair, Self::Error>;
}

View File

@ -14,116 +14,121 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use super::{SECP256K1, Public, Secret, Error};
use secp256k1::key;
use secp256k1::constants::{GENERATOR_X, GENERATOR_Y, CURVE_ORDER};
use ethereum_types::{U256, H256};
use super::{Error, Public, Secret, SECP256K1};
use ethereum_types::{H256, U256};
use secp256k1::{
constants::{CURVE_ORDER, GENERATOR_X, GENERATOR_Y},
key,
};
/// Whether the public key is valid.
pub fn public_is_valid(public: &Public) -> bool {
to_secp256k1_public(public).ok()
.map_or(false, |p| p.is_valid())
to_secp256k1_public(public)
.ok()
.map_or(false, |p| p.is_valid())
}
/// Inplace multiply public key by secret key (EC point * scalar)
pub fn public_mul_secret(public: &mut Public, secret: &Secret) -> Result<(), Error> {
let key_secret = secret.to_secp256k1_secret()?;
let mut key_public = to_secp256k1_public(public)?;
key_public.mul_assign(&SECP256K1, &key_secret)?;
set_public(public, &key_public);
Ok(())
let key_secret = secret.to_secp256k1_secret()?;
let mut key_public = to_secp256k1_public(public)?;
key_public.mul_assign(&SECP256K1, &key_secret)?;
set_public(public, &key_public);
Ok(())
}
/// Inplace add one public key to another (EC point + EC point)
pub fn public_add(public: &mut Public, other: &Public) -> Result<(), Error> {
let mut key_public = to_secp256k1_public(public)?;
let other_public = to_secp256k1_public(other)?;
key_public.add_assign(&SECP256K1, &other_public)?;
set_public(public, &key_public);
Ok(())
let mut key_public = to_secp256k1_public(public)?;
let other_public = to_secp256k1_public(other)?;
key_public.add_assign(&SECP256K1, &other_public)?;
set_public(public, &key_public);
Ok(())
}
/// Inplace sub one public key from another (EC point - EC point)
pub fn public_sub(public: &mut Public, other: &Public) -> Result<(), Error> {
let mut key_neg_other = to_secp256k1_public(other)?;
key_neg_other.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?;
let mut key_neg_other = to_secp256k1_public(other)?;
key_neg_other.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?;
let mut key_public = to_secp256k1_public(public)?;
key_public.add_assign(&SECP256K1, &key_neg_other)?;
set_public(public, &key_public);
Ok(())
let mut key_public = to_secp256k1_public(public)?;
key_public.add_assign(&SECP256K1, &key_neg_other)?;
set_public(public, &key_public);
Ok(())
}
/// Replace public key with its negation (EC point = - EC point)
pub fn public_negate(public: &mut Public) -> Result<(), Error> {
let mut key_public = to_secp256k1_public(public)?;
key_public.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?;
set_public(public, &key_public);
Ok(())
let mut key_public = to_secp256k1_public(public)?;
key_public.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?;
set_public(public, &key_public);
Ok(())
}
/// Return base point of secp256k1
pub fn generation_point() -> Public {
let mut public_sec_raw = [0u8; 65];
public_sec_raw[0] = 4;
public_sec_raw[1..33].copy_from_slice(&GENERATOR_X);
public_sec_raw[33..65].copy_from_slice(&GENERATOR_Y);
let mut public_sec_raw = [0u8; 65];
public_sec_raw[0] = 4;
public_sec_raw[1..33].copy_from_slice(&GENERATOR_X);
public_sec_raw[33..65].copy_from_slice(&GENERATOR_Y);
let public_key = key::PublicKey::from_slice(&SECP256K1, &public_sec_raw)
.expect("constructing using predefined constants; qed");
let mut public = Public::default();
set_public(&mut public, &public_key);
public
let public_key = key::PublicKey::from_slice(&SECP256K1, &public_sec_raw)
.expect("constructing using predefined constants; qed");
let mut public = Public::default();
set_public(&mut public, &public_key);
public
}
/// Return secp256k1 elliptic curve order
pub fn curve_order() -> U256 {
H256::from_slice(&CURVE_ORDER).into()
H256::from_slice(&CURVE_ORDER).into()
}
fn to_secp256k1_public(public: &Public) -> Result<key::PublicKey, Error> {
let public_data = {
let mut temp = [4u8; 65];
(&mut temp[1..65]).copy_from_slice(&public[0..64]);
temp
};
let public_data = {
let mut temp = [4u8; 65];
(&mut temp[1..65]).copy_from_slice(&public[0..64]);
temp
};
Ok(key::PublicKey::from_slice(&SECP256K1, &public_data)?)
Ok(key::PublicKey::from_slice(&SECP256K1, &public_data)?)
}
fn set_public(public: &mut Public, key_public: &key::PublicKey) {
let key_public_serialized = key_public.serialize_vec(&SECP256K1, false);
public.copy_from_slice(&key_public_serialized[1..65]);
let key_public_serialized = key_public.serialize_vec(&SECP256K1, false);
public.copy_from_slice(&key_public_serialized[1..65]);
}
#[cfg(test)]
mod tests {
use super::super::{Random, Generator};
use super::{public_add, public_sub};
use super::{
super::{Generator, Random},
public_add, public_sub,
};
#[test]
fn public_addition_is_commutative() {
let public1 = Random.generate().unwrap().public().clone();
let public2 = Random.generate().unwrap().public().clone();
#[test]
fn public_addition_is_commutative() {
let public1 = Random.generate().unwrap().public().clone();
let public2 = Random.generate().unwrap().public().clone();
let mut left = public1.clone();
public_add(&mut left, &public2).unwrap();
let mut left = public1.clone();
public_add(&mut left, &public2).unwrap();
let mut right = public2.clone();
public_add(&mut right, &public1).unwrap();
let mut right = public2.clone();
public_add(&mut right, &public1).unwrap();
assert_eq!(left, right);
}
assert_eq!(left, right);
}
#[test]
fn public_addition_is_reversible_with_subtraction() {
let public1 = Random.generate().unwrap().public().clone();
let public2 = Random.generate().unwrap().public().clone();
#[test]
fn public_addition_is_reversible_with_subtraction() {
let public1 = Random.generate().unwrap().public().clone();
let public2 = Random.generate().unwrap().public().clone();
let mut sum = public1.clone();
public_add(&mut sum, &public2).unwrap();
public_sub(&mut sum, &public2).unwrap();
let mut sum = public1.clone();
public_add(&mut sum, &public2).unwrap();
public_sub(&mut sum, &public2).unwrap();
assert_eq!(sum, public1);
}
assert_eq!(sum, public1);
}
}

View File

@ -20,40 +20,40 @@ use std::{fmt, ptr};
pub struct Password(String);
impl fmt::Debug for Password {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Password(******)")
}
}
impl Password {
pub fn as_bytes(&self) -> &[u8] {
self.0.as_bytes()
}
pub fn as_bytes(&self) -> &[u8] {
self.0.as_bytes()
}
pub fn as_str(&self) -> &str {
self.0.as_str()
}
pub fn as_str(&self) -> &str {
self.0.as_str()
}
}
// Custom drop impl to zero out memory.
impl Drop for Password {
fn drop(&mut self) {
unsafe {
for byte_ref in self.0.as_mut_vec() {
ptr::write_volatile(byte_ref, 0)
}
}
}
fn drop(&mut self) {
unsafe {
for byte_ref in self.0.as_mut_vec() {
ptr::write_volatile(byte_ref, 0)
}
}
}
}
impl From<String> for Password {
fn from(s: String) -> Password {
Password(s)
}
fn from(s: String) -> Password {
Password(s)
}
}
impl<'a> From<&'a str> for Password {
fn from(s: &'a str) -> Password {
Password::from(String::from(s))
}
fn from(s: &'a str) -> Password {
Password::from(String::from(s))
}
}

View File

@ -14,46 +14,49 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use super::{Random, Generator, KeyPair, Error};
use super::{Error, Generator, KeyPair, Random};
/// Tries to find keypair with address starting with given prefix.
pub struct Prefix {
prefix: Vec<u8>,
iterations: usize,
prefix: Vec<u8>,
iterations: usize,
}
impl Prefix {
pub fn new(prefix: Vec<u8>, iterations: usize) -> Self {
Prefix {
prefix: prefix,
iterations: iterations,
}
}
pub fn new(prefix: Vec<u8>, iterations: usize) -> Self {
Prefix {
prefix: prefix,
iterations: iterations,
}
}
}
impl Generator for Prefix {
type Error = Error;
type Error = Error;
fn generate(&mut self) -> Result<KeyPair, Error> {
for _ in 0..self.iterations {
let keypair = Random.generate()?;
if keypair.address().starts_with(&self.prefix) {
return Ok(keypair)
}
}
fn generate(&mut self) -> Result<KeyPair, Error> {
for _ in 0..self.iterations {
let keypair = Random.generate()?;
if keypair.address().starts_with(&self.prefix) {
return Ok(keypair);
}
}
Err(Error::Custom("Could not find keypair".into()))
}
Err(Error::Custom("Could not find keypair".into()))
}
}
#[cfg(test)]
mod tests {
use {Generator, Prefix};
use Generator;
use Prefix;
#[test]
fn prefix_generator() {
let prefix = vec![0xffu8];
let keypair = Prefix::new(prefix.clone(), usize::max_value()).generate().unwrap();
assert!(keypair.address().starts_with(&prefix));
}
#[test]
fn prefix_generator() {
let prefix = vec![0xffu8];
let keypair = Prefix::new(prefix.clone(), usize::max_value())
.generate()
.unwrap();
assert!(keypair.address().starts_with(&prefix));
}
}

View File

@ -14,31 +14,32 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use rand::os::OsRng;
use super::{Generator, KeyPair, SECP256K1};
use rand::os::OsRng;
/// Randomly generates new keypair, instantiating the RNG each time.
pub struct Random;
impl Generator for Random {
type Error = ::std::io::Error;
type Error = ::std::io::Error;
fn generate(&mut self) -> Result<KeyPair, Self::Error> {
let mut rng = OsRng::new()?;
match rng.generate() {
Ok(pair) => Ok(pair),
Err(void) => match void {}, // LLVM unreachable
}
}
fn generate(&mut self) -> Result<KeyPair, Self::Error> {
let mut rng = OsRng::new()?;
match rng.generate() {
Ok(pair) => Ok(pair),
Err(void) => match void {}, // LLVM unreachable
}
}
}
impl Generator for OsRng {
type Error = ::Void;
type Error = ::Void;
fn generate(&mut self) -> Result<KeyPair, Self::Error> {
let (sec, publ) = SECP256K1.generate_keypair(self)
.expect("context always created with full capabilities; qed");
fn generate(&mut self) -> Result<KeyPair, Self::Error> {
let (sec, publ) = SECP256K1
.generate_keypair(self)
.expect("context always created with full capabilities; qed");
Ok(KeyPair::from_keypair(sec, publ))
}
Ok(KeyPair::from_keypair(sec, publ))
}
}

View File

@ -14,285 +14,309 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::fmt;
use std::ops::Deref;
use std::str::FromStr;
use rustc_hex::ToHex;
use secp256k1::constants::{SECRET_KEY_SIZE as SECP256K1_SECRET_KEY_SIZE};
use secp256k1::key;
use ethereum_types::H256;
use memzero::Memzero;
use {Error, SECP256K1};
use rustc_hex::ToHex;
use secp256k1::{constants::SECRET_KEY_SIZE as SECP256K1_SECRET_KEY_SIZE, key};
use std::{fmt, ops::Deref, str::FromStr};
use Error;
use SECP256K1;
#[derive(Clone, PartialEq, Eq)]
pub struct Secret {
inner: Memzero<H256>,
inner: Memzero<H256>,
}
impl ToHex for Secret {
fn to_hex(&self) -> String {
format!("{:x}", *self.inner)
}
fn to_hex(&self) -> String {
format!("{:x}", *self.inner)
}
}
impl fmt::LowerHex for Secret {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
self.inner.fmt(fmt)
}
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
self.inner.fmt(fmt)
}
}
impl fmt::Debug for Secret {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
self.inner.fmt(fmt)
}
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
self.inner.fmt(fmt)
}
}
impl fmt::Display for Secret {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "Secret: 0x{:x}{:x}..{:x}{:x}", self.inner[0], self.inner[1], self.inner[30], self.inner[31])
}
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(
fmt,
"Secret: 0x{:x}{:x}..{:x}{:x}",
self.inner[0], self.inner[1], self.inner[30], self.inner[31]
)
}
}
impl Secret {
/// Creates a `Secret` from the given slice, returning `None` if the slice length != 32.
pub fn from_slice(key: &[u8]) -> Option<Self> {
if key.len() != 32 {
return None
}
let mut h = H256::default();
h.copy_from_slice(&key[0..32]);
Some(Secret { inner: Memzero::from(h) })
}
/// Creates a `Secret` from the given slice, returning `None` if the slice length != 32.
pub fn from_slice(key: &[u8]) -> Option<Self> {
if key.len() != 32 {
return None;
}
let mut h = H256::default();
h.copy_from_slice(&key[0..32]);
Some(Secret {
inner: Memzero::from(h),
})
}
/// Creates zero key, which is invalid for crypto operations, but valid for math operation.
pub fn zero() -> Self {
Secret { inner: Memzero::from(H256::default()) }
}
/// Creates zero key, which is invalid for crypto operations, but valid for math operation.
pub fn zero() -> Self {
Secret {
inner: Memzero::from(H256::default()),
}
}
/// Imports and validates the key.
pub fn from_unsafe_slice(key: &[u8]) -> Result<Self, Error> {
let secret = key::SecretKey::from_slice(&super::SECP256K1, key)?;
Ok(secret.into())
}
/// Imports and validates the key.
pub fn from_unsafe_slice(key: &[u8]) -> Result<Self, Error> {
let secret = key::SecretKey::from_slice(&super::SECP256K1, key)?;
Ok(secret.into())
}
/// Checks validity of this key.
pub fn check_validity(&self) -> Result<(), Error> {
self.to_secp256k1_secret().map(|_| ())
}
/// Checks validity of this key.
pub fn check_validity(&self) -> Result<(), Error> {
self.to_secp256k1_secret().map(|_| ())
}
/// Inplace add one secret key to another (scalar + scalar)
pub fn add(&mut self, other: &Secret) -> Result<(), Error> {
match (self.is_zero(), other.is_zero()) {
(true, true) | (false, true) => Ok(()),
(true, false) => {
*self = other.clone();
Ok(())
},
(false, false) => {
let mut key_secret = self.to_secp256k1_secret()?;
let other_secret = other.to_secp256k1_secret()?;
key_secret.add_assign(&SECP256K1, &other_secret)?;
/// Inplace add one secret key to another (scalar + scalar)
pub fn add(&mut self, other: &Secret) -> Result<(), Error> {
match (self.is_zero(), other.is_zero()) {
(true, true) | (false, true) => Ok(()),
(true, false) => {
*self = other.clone();
Ok(())
}
(false, false) => {
let mut key_secret = self.to_secp256k1_secret()?;
let other_secret = other.to_secp256k1_secret()?;
key_secret.add_assign(&SECP256K1, &other_secret)?;
*self = key_secret.into();
Ok(())
},
}
}
*self = key_secret.into();
Ok(())
}
}
}
/// Inplace subtract one secret key from another (scalar - scalar)
pub fn sub(&mut self, other: &Secret) -> Result<(), Error> {
match (self.is_zero(), other.is_zero()) {
(true, true) | (false, true) => Ok(()),
(true, false) => {
*self = other.clone();
self.neg()
},
(false, false) => {
let mut key_secret = self.to_secp256k1_secret()?;
let mut other_secret = other.to_secp256k1_secret()?;
other_secret.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?;
key_secret.add_assign(&SECP256K1, &other_secret)?;
/// Inplace subtract one secret key from another (scalar - scalar)
pub fn sub(&mut self, other: &Secret) -> Result<(), Error> {
match (self.is_zero(), other.is_zero()) {
(true, true) | (false, true) => Ok(()),
(true, false) => {
*self = other.clone();
self.neg()
}
(false, false) => {
let mut key_secret = self.to_secp256k1_secret()?;
let mut other_secret = other.to_secp256k1_secret()?;
other_secret.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?;
key_secret.add_assign(&SECP256K1, &other_secret)?;
*self = key_secret.into();
Ok(())
},
}
}
*self = key_secret.into();
Ok(())
}
}
}
/// Inplace decrease secret key (scalar - 1)
pub fn dec(&mut self) -> Result<(), Error> {
match self.is_zero() {
true => {
*self = key::MINUS_ONE_KEY.into();
Ok(())
},
false => {
let mut key_secret = self.to_secp256k1_secret()?;
key_secret.add_assign(&SECP256K1, &key::MINUS_ONE_KEY)?;
/// Inplace decrease secret key (scalar - 1)
pub fn dec(&mut self) -> Result<(), Error> {
match self.is_zero() {
true => {
*self = key::MINUS_ONE_KEY.into();
Ok(())
}
false => {
let mut key_secret = self.to_secp256k1_secret()?;
key_secret.add_assign(&SECP256K1, &key::MINUS_ONE_KEY)?;
*self = key_secret.into();
Ok(())
},
}
}
*self = key_secret.into();
Ok(())
}
}
}
/// Inplace multiply one secret key to another (scalar * scalar)
pub fn mul(&mut self, other: &Secret) -> Result<(), Error> {
match (self.is_zero(), other.is_zero()) {
(true, true) | (true, false) => Ok(()),
(false, true) => {
*self = Self::zero();
Ok(())
},
(false, false) => {
let mut key_secret = self.to_secp256k1_secret()?;
let other_secret = other.to_secp256k1_secret()?;
key_secret.mul_assign(&SECP256K1, &other_secret)?;
/// Inplace multiply one secret key to another (scalar * scalar)
pub fn mul(&mut self, other: &Secret) -> Result<(), Error> {
match (self.is_zero(), other.is_zero()) {
(true, true) | (true, false) => Ok(()),
(false, true) => {
*self = Self::zero();
Ok(())
}
(false, false) => {
let mut key_secret = self.to_secp256k1_secret()?;
let other_secret = other.to_secp256k1_secret()?;
key_secret.mul_assign(&SECP256K1, &other_secret)?;
*self = key_secret.into();
Ok(())
},
}
}
*self = key_secret.into();
Ok(())
}
}
}
/// Inplace negate secret key (-scalar)
pub fn neg(&mut self) -> Result<(), Error> {
match self.is_zero() {
true => Ok(()),
false => {
let mut key_secret = self.to_secp256k1_secret()?;
key_secret.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?;
/// Inplace negate secret key (-scalar)
pub fn neg(&mut self) -> Result<(), Error> {
match self.is_zero() {
true => Ok(()),
false => {
let mut key_secret = self.to_secp256k1_secret()?;
key_secret.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?;
*self = key_secret.into();
Ok(())
},
}
}
*self = key_secret.into();
Ok(())
}
}
}
/// Inplace inverse secret key (1 / scalar)
pub fn inv(&mut self) -> Result<(), Error> {
let mut key_secret = self.to_secp256k1_secret()?;
key_secret.inv_assign(&SECP256K1)?;
/// Inplace inverse secret key (1 / scalar)
pub fn inv(&mut self) -> Result<(), Error> {
let mut key_secret = self.to_secp256k1_secret()?;
key_secret.inv_assign(&SECP256K1)?;
*self = key_secret.into();
Ok(())
}
*self = key_secret.into();
Ok(())
}
/// Compute power of secret key inplace (secret ^ pow).
/// This function is not intended to be used with large powers.
pub fn pow(&mut self, pow: usize) -> Result<(), Error> {
if self.is_zero() {
return Ok(());
}
/// Compute power of secret key inplace (secret ^ pow).
/// This function is not intended to be used with large powers.
pub fn pow(&mut self, pow: usize) -> Result<(), Error> {
if self.is_zero() {
return Ok(());
}
match pow {
0 => *self = key::ONE_KEY.into(),
1 => (),
_ => {
let c = self.clone();
for _ in 1..pow {
self.mul(&c)?;
}
},
}
match pow {
0 => *self = key::ONE_KEY.into(),
1 => (),
_ => {
let c = self.clone();
for _ in 1..pow {
self.mul(&c)?;
}
}
}
Ok(())
}
Ok(())
}
/// Create `secp256k1::key::SecretKey` based on this secret
pub fn to_secp256k1_secret(&self) -> Result<key::SecretKey, Error> {
Ok(key::SecretKey::from_slice(&SECP256K1, &self[..])?)
}
/// Create `secp256k1::key::SecretKey` based on this secret
pub fn to_secp256k1_secret(&self) -> Result<key::SecretKey, Error> {
Ok(key::SecretKey::from_slice(&SECP256K1, &self[..])?)
}
}
impl FromStr for Secret {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(H256::from_str(s).map_err(|e| Error::Custom(format!("{:?}", e)))?.into())
}
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(H256::from_str(s)
.map_err(|e| Error::Custom(format!("{:?}", e)))?
.into())
}
}
impl From<[u8; 32]> for Secret {
fn from(k: [u8; 32]) -> Self {
Secret { inner: Memzero::from(H256(k)) }
}
fn from(k: [u8; 32]) -> Self {
Secret {
inner: Memzero::from(H256(k)),
}
}
}
impl From<H256> for Secret {
fn from(s: H256) -> Self {
s.0.into()
}
fn from(s: H256) -> Self {
s.0.into()
}
}
impl From<&'static str> for Secret {
fn from(s: &'static str) -> Self {
s.parse().expect(&format!("invalid string literal for {}: '{}'", stringify!(Self), s))
}
fn from(s: &'static str) -> Self {
s.parse().expect(&format!(
"invalid string literal for {}: '{}'",
stringify!(Self),
s
))
}
}
impl From<key::SecretKey> for Secret {
fn from(key: key::SecretKey) -> Self {
let mut a = [0; SECP256K1_SECRET_KEY_SIZE];
a.copy_from_slice(&key[0 .. SECP256K1_SECRET_KEY_SIZE]);
a.into()
}
fn from(key: key::SecretKey) -> Self {
let mut a = [0; SECP256K1_SECRET_KEY_SIZE];
a.copy_from_slice(&key[0..SECP256K1_SECRET_KEY_SIZE]);
a.into()
}
}
impl Deref for Secret {
type Target = H256;
type Target = H256;
fn deref(&self) -> &Self::Target {
&self.inner
}
fn deref(&self) -> &Self::Target {
&self.inner
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use super::super::{Random, Generator};
use super::Secret;
use super::{
super::{Generator, Random},
Secret,
};
use std::str::FromStr;
#[test]
fn multiplicating_secret_inversion_with_secret_gives_one() {
let secret = Random.generate().unwrap().secret().clone();
let mut inversion = secret.clone();
inversion.inv().unwrap();
inversion.mul(&secret).unwrap();
assert_eq!(inversion, Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap());
}
#[test]
fn multiplicating_secret_inversion_with_secret_gives_one() {
let secret = Random.generate().unwrap().secret().clone();
let mut inversion = secret.clone();
inversion.inv().unwrap();
inversion.mul(&secret).unwrap();
assert_eq!(
inversion,
Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001")
.unwrap()
);
}
#[test]
fn secret_inversion_is_reversible_with_inversion() {
let secret = Random.generate().unwrap().secret().clone();
let mut inversion = secret.clone();
inversion.inv().unwrap();
inversion.inv().unwrap();
assert_eq!(inversion, secret);
}
#[test]
fn secret_inversion_is_reversible_with_inversion() {
let secret = Random.generate().unwrap().secret().clone();
let mut inversion = secret.clone();
inversion.inv().unwrap();
inversion.inv().unwrap();
assert_eq!(inversion, secret);
}
#[test]
fn secret_pow() {
let secret = Random.generate().unwrap().secret().clone();
#[test]
fn secret_pow() {
let secret = Random.generate().unwrap().secret().clone();
let mut pow0 = secret.clone();
pow0.pow(0).unwrap();
assert_eq!(pow0, Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap());
let mut pow0 = secret.clone();
pow0.pow(0).unwrap();
assert_eq!(
pow0,
Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001")
.unwrap()
);
let mut pow1 = secret.clone();
pow1.pow(1).unwrap();
assert_eq!(pow1, secret);
let mut pow1 = secret.clone();
pow1.pow(1).unwrap();
assert_eq!(pow1, secret);
let mut pow2 = secret.clone();
pow2.pow(2).unwrap();
let mut pow2_expected = secret.clone();
pow2_expected.mul(&secret).unwrap();
assert_eq!(pow2, pow2_expected);
let mut pow2 = secret.clone();
pow2.pow(2).unwrap();
let mut pow2_expected = secret.clone();
pow2_expected.mul(&secret).unwrap();
assert_eq!(pow2, pow2_expected);
let mut pow3 = secret.clone();
pow3.pow(3).unwrap();
let mut pow3_expected = secret.clone();
pow3_expected.mul(&secret).unwrap();
pow3_expected.mul(&secret).unwrap();
assert_eq!(pow3, pow3_expected);
}
let mut pow3 = secret.clone();
pow3.pow(3).unwrap();
let mut pow3_expected = secret.clone();
pow3_expected.mul(&secret).unwrap();
pow3_expected.mul(&secret).unwrap();
assert_eq!(pow3, pow3_expected);
}
}

View File

@ -14,281 +14,312 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::ops::{Deref, DerefMut};
use std::cmp::PartialEq;
use std::fmt;
use std::str::FromStr;
use std::hash::{Hash, Hasher};
use secp256k1::{Message as SecpMessage, RecoverableSignature, RecoveryId, Error as SecpError};
use secp256k1::key::{SecretKey, PublicKey};
use rustc_hex::{ToHex, FromHex};
use ethereum_types::{H520, H256};
use {Secret, Public, SECP256K1, Error, Message, public_to_address, Address};
use ethereum_types::{H256, H520};
use public_to_address;
use rustc_hex::{FromHex, ToHex};
use secp256k1::{
key::{PublicKey, SecretKey},
Error as SecpError, Message as SecpMessage, RecoverableSignature, RecoveryId,
};
use std::{
cmp::PartialEq,
fmt,
hash::{Hash, Hasher},
ops::{Deref, DerefMut},
str::FromStr,
};
use Address;
use Error;
use Message;
use Public;
use Secret;
use SECP256K1;
/// Signature encoded as RSV components
#[repr(C)]
pub struct Signature([u8; 65]);
impl Signature {
/// Get a slice into the 'r' portion of the data.
pub fn r(&self) -> &[u8] {
&self.0[0..32]
}
/// Get a slice into the 'r' portion of the data.
pub fn r(&self) -> &[u8] {
&self.0[0..32]
}
/// Get a slice into the 's' portion of the data.
pub fn s(&self) -> &[u8] {
&self.0[32..64]
}
/// Get a slice into the 's' portion of the data.
pub fn s(&self) -> &[u8] {
&self.0[32..64]
}
/// Get the recovery byte.
pub fn v(&self) -> u8 {
self.0[64]
}
/// Get the recovery byte.
pub fn v(&self) -> u8 {
self.0[64]
}
/// Encode the signature into RSV array (V altered to be in "Electrum" notation).
pub fn into_electrum(mut self) -> [u8; 65] {
self.0[64] += 27;
self.0
}
/// Encode the signature into RSV array (V altered to be in "Electrum" notation).
pub fn into_electrum(mut self) -> [u8; 65] {
self.0[64] += 27;
self.0
}
/// Parse bytes as a signature encoded as RSV (V in "Electrum" notation).
/// May return empty (invalid) signature if given data has invalid length.
pub fn from_electrum(data: &[u8]) -> Self {
if data.len() != 65 || data[64] < 27 {
// fallback to empty (invalid) signature
return Signature::default();
}
/// Parse bytes as a signature encoded as RSV (V in "Electrum" notation).
/// May return empty (invalid) signature if given data has invalid length.
pub fn from_electrum(data: &[u8]) -> Self {
if data.len() != 65 || data[64] < 27 {
// fallback to empty (invalid) signature
return Signature::default();
}
let mut sig = [0u8; 65];
sig.copy_from_slice(data);
sig[64] -= 27;
Signature(sig)
}
let mut sig = [0u8; 65];
sig.copy_from_slice(data);
sig[64] -= 27;
Signature(sig)
}
/// Create a signature object from the sig.
pub fn from_rsv(r: &H256, s: &H256, v: u8) -> Self {
let mut sig = [0u8; 65];
sig[0..32].copy_from_slice(&r);
sig[32..64].copy_from_slice(&s);
sig[64] = v;
Signature(sig)
}
/// Create a signature object from the sig.
pub fn from_rsv(r: &H256, s: &H256, v: u8) -> Self {
let mut sig = [0u8; 65];
sig[0..32].copy_from_slice(&r);
sig[32..64].copy_from_slice(&s);
sig[64] = v;
Signature(sig)
}
/// Check if this is a "low" signature.
pub fn is_low_s(&self) -> bool {
H256::from_slice(self.s()) <= "7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0".into()
}
/// Check if this is a "low" signature.
pub fn is_low_s(&self) -> bool {
H256::from_slice(self.s())
<= "7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0".into()
}
/// Check if each component of the signature is in range.
pub fn is_valid(&self) -> bool {
self.v() <= 1 &&
H256::from_slice(self.r()) < "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141".into() &&
H256::from_slice(self.r()) >= 1.into() &&
H256::from_slice(self.s()) < "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141".into() &&
H256::from_slice(self.s()) >= 1.into()
}
/// Check if each component of the signature is in range.
pub fn is_valid(&self) -> bool {
self.v() <= 1
&& H256::from_slice(self.r())
< "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141".into()
&& H256::from_slice(self.r()) >= 1.into()
&& H256::from_slice(self.s())
< "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141".into()
&& H256::from_slice(self.s()) >= 1.into()
}
}
// manual implementation large arrays don't have trait impls by default.
// remove when integer generics exist
impl PartialEq for Signature {
fn eq(&self, other: &Self) -> bool {
&self.0[..] == &other.0[..]
}
fn eq(&self, other: &Self) -> bool {
&self.0[..] == &other.0[..]
}
}
// manual implementation required in Rust 1.13+, see `std::cmp::AssertParamIsEq`.
impl Eq for Signature { }
impl Eq for Signature {}
// also manual for the same reason, but the pretty printing might be useful.
impl fmt::Debug for Signature {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.debug_struct("Signature")
.field("r", &self.0[0..32].to_hex())
.field("s", &self.0[32..64].to_hex())
.field("v", &self.0[64..65].to_hex())
.finish()
}
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.debug_struct("Signature")
.field("r", &self.0[0..32].to_hex())
.field("s", &self.0[32..64].to_hex())
.field("v", &self.0[64..65].to_hex())
.finish()
}
}
impl fmt::Display for Signature {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "{}", self.to_hex())
}
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "{}", self.to_hex())
}
}
impl FromStr for Signature {
type Err = Error;
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.from_hex() {
Ok(ref hex) if hex.len() == 65 => {
let mut data = [0; 65];
data.copy_from_slice(&hex[0..65]);
Ok(Signature(data))
},
_ => Err(Error::InvalidSignature)
}
}
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.from_hex() {
Ok(ref hex) if hex.len() == 65 => {
let mut data = [0; 65];
data.copy_from_slice(&hex[0..65]);
Ok(Signature(data))
}
_ => Err(Error::InvalidSignature),
}
}
}
impl Default for Signature {
fn default() -> Self {
Signature([0; 65])
}
fn default() -> Self {
Signature([0; 65])
}
}
impl Hash for Signature {
fn hash<H: Hasher>(&self, state: &mut H) {
H520::from(self.0).hash(state);
H520::from(self.0).hash(state);
}
}
impl Clone for Signature {
fn clone(&self) -> Self {
Signature(self.0)
Signature(self.0)
}
}
impl From<[u8; 65]> for Signature {
fn from(s: [u8; 65]) -> Self {
Signature(s)
}
fn from(s: [u8; 65]) -> Self {
Signature(s)
}
}
impl Into<[u8; 65]> for Signature {
fn into(self) -> [u8; 65] {
self.0
}
fn into(self) -> [u8; 65] {
self.0
}
}
impl From<Signature> for H520 {
fn from(s: Signature) -> Self {
H520::from(s.0)
}
fn from(s: Signature) -> Self {
H520::from(s.0)
}
}
impl From<H520> for Signature {
fn from(bytes: H520) -> Self {
Signature(bytes.into())
}
fn from(bytes: H520) -> Self {
Signature(bytes.into())
}
}
impl Deref for Signature {
type Target = [u8; 65];
type Target = [u8; 65];
fn deref(&self) -> &Self::Target {
&self.0
}
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Signature {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
pub fn sign(secret: &Secret, message: &Message) -> Result<Signature, Error> {
let context = &SECP256K1;
let sec = SecretKey::from_slice(context, &secret)?;
let s = context.sign_recoverable(&SecpMessage::from_slice(&message[..])?, &sec)?;
let (rec_id, data) = s.serialize_compact(context);
let mut data_arr = [0; 65];
let context = &SECP256K1;
let sec = SecretKey::from_slice(context, &secret)?;
let s = context.sign_recoverable(&SecpMessage::from_slice(&message[..])?, &sec)?;
let (rec_id, data) = s.serialize_compact(context);
let mut data_arr = [0; 65];
// no need to check if s is low, it always is
data_arr[0..64].copy_from_slice(&data[0..64]);
data_arr[64] = rec_id.to_i32() as u8;
Ok(Signature(data_arr))
// no need to check if s is low, it always is
data_arr[0..64].copy_from_slice(&data[0..64]);
data_arr[64] = rec_id.to_i32() as u8;
Ok(Signature(data_arr))
}
pub fn verify_public(public: &Public, signature: &Signature, message: &Message) -> Result<bool, Error> {
let context = &SECP256K1;
let rsig = RecoverableSignature::from_compact(context, &signature[0..64], RecoveryId::from_i32(signature[64] as i32)?)?;
let sig = rsig.to_standard(context);
pub fn verify_public(
public: &Public,
signature: &Signature,
message: &Message,
) -> Result<bool, Error> {
let context = &SECP256K1;
let rsig = RecoverableSignature::from_compact(
context,
&signature[0..64],
RecoveryId::from_i32(signature[64] as i32)?,
)?;
let sig = rsig.to_standard(context);
let pdata: [u8; 65] = {
let mut temp = [4u8; 65];
temp[1..65].copy_from_slice(&**public);
temp
};
let pdata: [u8; 65] = {
let mut temp = [4u8; 65];
temp[1..65].copy_from_slice(&**public);
temp
};
let publ = PublicKey::from_slice(context, &pdata)?;
match context.verify(&SecpMessage::from_slice(&message[..])?, &sig, &publ) {
Ok(_) => Ok(true),
Err(SecpError::IncorrectSignature) => Ok(false),
Err(x) => Err(Error::from(x))
}
let publ = PublicKey::from_slice(context, &pdata)?;
match context.verify(&SecpMessage::from_slice(&message[..])?, &sig, &publ) {
Ok(_) => Ok(true),
Err(SecpError::IncorrectSignature) => Ok(false),
Err(x) => Err(Error::from(x)),
}
}
pub fn verify_address(address: &Address, signature: &Signature, message: &Message) -> Result<bool, Error> {
let public = recover(signature, message)?;
let recovered_address = public_to_address(&public);
Ok(address == &recovered_address)
pub fn verify_address(
address: &Address,
signature: &Signature,
message: &Message,
) -> Result<bool, Error> {
let public = recover(signature, message)?;
let recovered_address = public_to_address(&public);
Ok(address == &recovered_address)
}
pub fn recover(signature: &Signature, message: &Message) -> Result<Public, Error> {
let context = &SECP256K1;
let rsig = RecoverableSignature::from_compact(context, &signature[0..64], RecoveryId::from_i32(signature[64] as i32)?)?;
let pubkey = context.recover(&SecpMessage::from_slice(&message[..])?, &rsig)?;
let serialized = pubkey.serialize_vec(context, false);
let context = &SECP256K1;
let rsig = RecoverableSignature::from_compact(
context,
&signature[0..64],
RecoveryId::from_i32(signature[64] as i32)?,
)?;
let pubkey = context.recover(&SecpMessage::from_slice(&message[..])?, &rsig)?;
let serialized = pubkey.serialize_vec(context, false);
let mut public = Public::default();
public.copy_from_slice(&serialized[1..65]);
Ok(public)
let mut public = Public::default();
public.copy_from_slice(&serialized[1..65]);
Ok(public)
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use {Generator, Random, Message};
use super::{sign, verify_public, verify_address, recover, Signature};
use super::{recover, sign, verify_address, verify_public, Signature};
use std::str::FromStr;
use Generator;
use Message;
use Random;
#[test]
fn vrs_conversion() {
// given
let keypair = Random.generate().unwrap();
let message = Message::default();
let signature = sign(keypair.secret(), &message).unwrap();
#[test]
fn vrs_conversion() {
// given
let keypair = Random.generate().unwrap();
let message = Message::default();
let signature = sign(keypair.secret(), &message).unwrap();
// when
let vrs = signature.clone().into_electrum();
let from_vrs = Signature::from_electrum(&vrs);
// when
let vrs = signature.clone().into_electrum();
let from_vrs = Signature::from_electrum(&vrs);
// then
assert_eq!(signature, from_vrs);
}
// then
assert_eq!(signature, from_vrs);
}
#[test]
fn signature_to_and_from_str() {
let keypair = Random.generate().unwrap();
let message = Message::default();
let signature = sign(keypair.secret(), &message).unwrap();
let string = format!("{}", signature);
let deserialized = Signature::from_str(&string).unwrap();
assert_eq!(signature, deserialized);
}
#[test]
fn signature_to_and_from_str() {
let keypair = Random.generate().unwrap();
let message = Message::default();
let signature = sign(keypair.secret(), &message).unwrap();
let string = format!("{}", signature);
let deserialized = Signature::from_str(&string).unwrap();
assert_eq!(signature, deserialized);
}
#[test]
fn sign_and_recover_public() {
let keypair = Random.generate().unwrap();
let message = Message::default();
let signature = sign(keypair.secret(), &message).unwrap();
assert_eq!(keypair.public(), &recover(&signature, &message).unwrap());
}
#[test]
fn sign_and_recover_public() {
let keypair = Random.generate().unwrap();
let message = Message::default();
let signature = sign(keypair.secret(), &message).unwrap();
assert_eq!(keypair.public(), &recover(&signature, &message).unwrap());
}
#[test]
fn sign_and_verify_public() {
let keypair = Random.generate().unwrap();
let message = Message::default();
let signature = sign(keypair.secret(), &message).unwrap();
assert!(verify_public(keypair.public(), &signature, &message).unwrap());
}
#[test]
fn sign_and_verify_public() {
let keypair = Random.generate().unwrap();
let message = Message::default();
let signature = sign(keypair.secret(), &message).unwrap();
assert!(verify_public(keypair.public(), &signature, &message).unwrap());
}
#[test]
fn sign_and_verify_address() {
let keypair = Random.generate().unwrap();
let message = Message::default();
let signature = sign(keypair.secret(), &message).unwrap();
assert!(verify_address(&keypair.address(), &signature, &message).unwrap());
}
#[test]
fn sign_and_verify_address() {
let keypair = Random.generate().unwrap();
let message = Message::default();
let signature = sign(keypair.secret(), &message).unwrap();
assert!(verify_address(&keypair.address(), &signature, &message).unwrap());
}
}

View File

@ -14,53 +14,53 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::{cmp, thread};
use std::sync::Arc;
use std::collections::VecDeque;
use parking_lot::Mutex;
use std::{cmp, collections::VecDeque, sync::Arc, thread};
use ethstore::{ethkey::Password, PresaleWallet, Error};
use ethstore::{ethkey::Password, Error, PresaleWallet};
use num_cpus;
pub fn run(passwords: VecDeque<Password>, wallet_path: &str) -> Result<(), Error> {
let passwords = Arc::new(Mutex::new(passwords));
let passwords = Arc::new(Mutex::new(passwords));
let mut handles = Vec::new();
let mut handles = Vec::new();
for _ in 0..num_cpus::get() {
let passwords = passwords.clone();
let wallet = PresaleWallet::open(&wallet_path)?;
handles.push(thread::spawn(move || {
look_for_password(passwords, wallet);
}));
}
for _ in 0..num_cpus::get() {
let passwords = passwords.clone();
let wallet = PresaleWallet::open(&wallet_path)?;
handles.push(thread::spawn(move || {
look_for_password(passwords, wallet);
}));
}
for handle in handles {
handle.join().map_err(|err| Error::Custom(format!("Error finishing thread: {:?}", err)))?;
}
for handle in handles {
handle
.join()
.map_err(|err| Error::Custom(format!("Error finishing thread: {:?}", err)))?;
}
Ok(())
Ok(())
}
fn look_for_password(passwords: Arc<Mutex<VecDeque<Password>>>, wallet: PresaleWallet) {
let mut counter = 0;
while !passwords.lock().is_empty() {
let package = {
let mut passwords = passwords.lock();
let len = passwords.len();
passwords.split_off(cmp::min(len, 32))
};
for pass in package {
counter += 1;
match wallet.decrypt(&pass) {
Ok(_) => {
println!("Found password: {}", pass.as_str());
passwords.lock().clear();
return;
},
_ if counter % 100 == 0 => print!("."),
_ => {},
}
}
}
let mut counter = 0;
while !passwords.lock().is_empty() {
let package = {
let mut passwords = passwords.lock();
let len = passwords.len();
passwords.split_off(cmp::min(len, 32))
};
for pass in package {
counter += 1;
match wallet.decrypt(&pass) {
Ok(_) => {
println!("Found password: {}", pass.as_str());
passwords.lock().clear();
return;
}
_ if counter % 100 == 0 => print!("."),
_ => {}
}
}
}
}

View File

@ -28,14 +28,15 @@ extern crate env_logger;
#[macro_use]
extern crate serde_derive;
use std::collections::VecDeque;
use std::io::Read;
use std::{env, process, fs, fmt};
use std::{collections::VecDeque, env, fmt, fs, io::Read, process};
use docopt::Docopt;
use ethstore::accounts_dir::{KeyDirectory, RootDiskDirectory};
use ethstore::ethkey::{Address, Password};
use ethstore::{EthStore, SimpleSecretStore, SecretStore, import_accounts, PresaleWallet, SecretVaultRef, StoreAccountRef};
use ethstore::{
accounts_dir::{KeyDirectory, RootDiskDirectory},
ethkey::{Address, Password},
import_accounts, EthStore, PresaleWallet, SecretStore, SecretVaultRef, SimpleSecretStore,
StoreAccountRef,
};
mod crack;
@ -92,226 +93,271 @@ Commands:
#[derive(Debug, Deserialize)]
struct Args {
cmd_insert: bool,
cmd_change_pwd: bool,
cmd_list: bool,
cmd_import: bool,
cmd_import_wallet: bool,
cmd_find_wallet_pass: bool,
cmd_remove: bool,
cmd_sign: bool,
cmd_public: bool,
cmd_list_vaults: bool,
cmd_create_vault: bool,
cmd_change_vault_pwd: bool,
cmd_move_to_vault: bool,
cmd_move_from_vault: bool,
arg_secret: String,
arg_password: String,
arg_old_pwd: String,
arg_new_pwd: String,
arg_address: String,
arg_message: String,
arg_path: String,
arg_vault: String,
flag_src: String,
flag_dir: String,
flag_vault: String,
flag_vault_pwd: String,
cmd_insert: bool,
cmd_change_pwd: bool,
cmd_list: bool,
cmd_import: bool,
cmd_import_wallet: bool,
cmd_find_wallet_pass: bool,
cmd_remove: bool,
cmd_sign: bool,
cmd_public: bool,
cmd_list_vaults: bool,
cmd_create_vault: bool,
cmd_change_vault_pwd: bool,
cmd_move_to_vault: bool,
cmd_move_from_vault: bool,
arg_secret: String,
arg_password: String,
arg_old_pwd: String,
arg_new_pwd: String,
arg_address: String,
arg_message: String,
arg_path: String,
arg_vault: String,
flag_src: String,
flag_dir: String,
flag_vault: String,
flag_vault_pwd: String,
}
enum Error {
Ethstore(ethstore::Error),
Docopt(docopt::Error),
Ethstore(ethstore::Error),
Docopt(docopt::Error),
}
impl From<ethstore::Error> for Error {
fn from(err: ethstore::Error) -> Self {
Error::Ethstore(err)
}
fn from(err: ethstore::Error) -> Self {
Error::Ethstore(err)
}
}
impl From<docopt::Error> for Error {
fn from(err: docopt::Error) -> Self {
Error::Docopt(err)
}
fn from(err: docopt::Error) -> Self {
Error::Docopt(err)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::Ethstore(ref err) => fmt::Display::fmt(err, f),
Error::Docopt(ref err) => fmt::Display::fmt(err, f),
}
}
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::Ethstore(ref err) => fmt::Display::fmt(err, f),
Error::Docopt(ref err) => fmt::Display::fmt(err, f),
}
}
}
fn main() {
panic_hook::set_abort();
if env::var("RUST_LOG").is_err() {
env::set_var("RUST_LOG", "warn")
}
env_logger::try_init().expect("Logger initialized only once.");
panic_hook::set_abort();
if env::var("RUST_LOG").is_err() {
env::set_var("RUST_LOG", "warn")
}
env_logger::try_init().expect("Logger initialized only once.");
match execute(env::args()) {
Ok(result) => println!("{}", result),
Err(Error::Docopt(ref e)) => e.exit(),
Err(err) => {
eprintln!("{}", err);
process::exit(1);
}
}
match execute(env::args()) {
Ok(result) => println!("{}", result),
Err(Error::Docopt(ref e)) => e.exit(),
Err(err) => {
eprintln!("{}", err);
process::exit(1);
}
}
}
fn key_dir(location: &str, password: Option<Password>) -> Result<Box<KeyDirectory>, Error> {
let dir: RootDiskDirectory = match location {
"geth" => RootDiskDirectory::create(dir::geth(false))?,
"geth-test" => RootDiskDirectory::create(dir::geth(true))?,
path if path.starts_with("parity") => {
let chain = path.split('-').nth(1).unwrap_or("ethereum");
let path = dir::parity(chain);
RootDiskDirectory::create(path)?
},
path => RootDiskDirectory::create(path)?,
};
let dir: RootDiskDirectory = match location {
"geth" => RootDiskDirectory::create(dir::geth(false))?,
"geth-test" => RootDiskDirectory::create(dir::geth(true))?,
path if path.starts_with("parity") => {
let chain = path.split('-').nth(1).unwrap_or("ethereum");
let path = dir::parity(chain);
RootDiskDirectory::create(path)?
}
path => RootDiskDirectory::create(path)?,
};
Ok(Box::new(dir.with_password(password)))
Ok(Box::new(dir.with_password(password)))
}
fn open_args_vault(store: &EthStore, args: &Args) -> Result<SecretVaultRef, Error> {
if args.flag_vault.is_empty() {
return Ok(SecretVaultRef::Root);
}
if args.flag_vault.is_empty() {
return Ok(SecretVaultRef::Root);
}
let vault_pwd = load_password(&args.flag_vault_pwd)?;
store.open_vault(&args.flag_vault, &vault_pwd)?;
Ok(SecretVaultRef::Vault(args.flag_vault.clone()))
let vault_pwd = load_password(&args.flag_vault_pwd)?;
store.open_vault(&args.flag_vault, &vault_pwd)?;
Ok(SecretVaultRef::Vault(args.flag_vault.clone()))
}
fn open_args_vault_account(store: &EthStore, address: Address, args: &Args) -> Result<StoreAccountRef, Error> {
match open_args_vault(store, args)? {
SecretVaultRef::Root => Ok(StoreAccountRef::root(address)),
SecretVaultRef::Vault(name) => Ok(StoreAccountRef::vault(&name, address)),
}
fn open_args_vault_account(
store: &EthStore,
address: Address,
args: &Args,
) -> Result<StoreAccountRef, Error> {
match open_args_vault(store, args)? {
SecretVaultRef::Root => Ok(StoreAccountRef::root(address)),
SecretVaultRef::Vault(name) => Ok(StoreAccountRef::vault(&name, address)),
}
}
fn format_accounts(accounts: &[Address]) -> String {
accounts.iter()
.enumerate()
.map(|(i, a)| format!("{:2}: 0x{:x}", i, a))
.collect::<Vec<String>>()
.join("\n")
accounts
.iter()
.enumerate()
.map(|(i, a)| format!("{:2}: 0x{:x}", i, a))
.collect::<Vec<String>>()
.join("\n")
}
fn format_vaults(vaults: &[String]) -> String {
vaults.join("\n")
vaults.join("\n")
}
fn load_password(path: &str) -> Result<Password, Error> {
let mut file = fs::File::open(path).map_err(|e| ethstore::Error::Custom(format!("Error opening password file '{}': {}", path, e)))?;
let mut password = String::new();
file.read_to_string(&mut password).map_err(|e| ethstore::Error::Custom(format!("Error reading password file '{}': {}", path, e)))?;
// drop EOF
let _ = password.pop();
Ok(password.into())
let mut file = fs::File::open(path).map_err(|e| {
ethstore::Error::Custom(format!("Error opening password file '{}': {}", path, e))
})?;
let mut password = String::new();
file.read_to_string(&mut password).map_err(|e| {
ethstore::Error::Custom(format!("Error reading password file '{}': {}", path, e))
})?;
// drop EOF
let _ = password.pop();
Ok(password.into())
}
fn execute<S, I>(command: I) -> Result<String, Error> where I: IntoIterator<Item=S>, S: AsRef<str> {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.argv(command).deserialize())?;
fn execute<S, I>(command: I) -> Result<String, Error>
where
I: IntoIterator<Item = S>,
S: AsRef<str>,
{
let args: Args = Docopt::new(USAGE).and_then(|d| d.argv(command).deserialize())?;
let store = EthStore::open(key_dir(&args.flag_dir, None)?)?;
let store = EthStore::open(key_dir(&args.flag_dir, None)?)?;
return if args.cmd_insert {
let secret = args.arg_secret.parse().map_err(|_| ethstore::Error::InvalidSecret)?;
let password = load_password(&args.arg_password)?;
let vault_ref = open_args_vault(&store, &args)?;
let account_ref = store.insert_account(vault_ref, secret, &password)?;
Ok(format!("0x{:x}", account_ref.address))
} else if args.cmd_change_pwd {
let address = args.arg_address.parse().map_err(|_| ethstore::Error::InvalidAccount)?;
let old_pwd = load_password(&args.arg_old_pwd)?;
let new_pwd = load_password(&args.arg_new_pwd)?;
let account_ref = open_args_vault_account(&store, address, &args)?;
let ok = store.change_password(&account_ref, &old_pwd, &new_pwd).is_ok();
Ok(format!("{}", ok))
} else if args.cmd_list {
let vault_ref = open_args_vault(&store, &args)?;
let accounts = store.accounts()?;
let accounts: Vec<_> = accounts
.into_iter()
.filter(|a| &a.vault == &vault_ref)
.map(|a| a.address)
.collect();
Ok(format_accounts(&accounts))
} else if args.cmd_import {
let password = match args.arg_password.as_ref() {
"" => None,
_ => Some(load_password(&args.arg_password)?)
};
let src = key_dir(&args.flag_src, password)?;
let dst = key_dir(&args.flag_dir, None)?;
return if args.cmd_insert {
let secret = args
.arg_secret
.parse()
.map_err(|_| ethstore::Error::InvalidSecret)?;
let password = load_password(&args.arg_password)?;
let vault_ref = open_args_vault(&store, &args)?;
let account_ref = store.insert_account(vault_ref, secret, &password)?;
Ok(format!("0x{:x}", account_ref.address))
} else if args.cmd_change_pwd {
let address = args
.arg_address
.parse()
.map_err(|_| ethstore::Error::InvalidAccount)?;
let old_pwd = load_password(&args.arg_old_pwd)?;
let new_pwd = load_password(&args.arg_new_pwd)?;
let account_ref = open_args_vault_account(&store, address, &args)?;
let ok = store
.change_password(&account_ref, &old_pwd, &new_pwd)
.is_ok();
Ok(format!("{}", ok))
} else if args.cmd_list {
let vault_ref = open_args_vault(&store, &args)?;
let accounts = store.accounts()?;
let accounts: Vec<_> = accounts
.into_iter()
.filter(|a| &a.vault == &vault_ref)
.map(|a| a.address)
.collect();
Ok(format_accounts(&accounts))
} else if args.cmd_import {
let password = match args.arg_password.as_ref() {
"" => None,
_ => Some(load_password(&args.arg_password)?),
};
let src = key_dir(&args.flag_src, password)?;
let dst = key_dir(&args.flag_dir, None)?;
let accounts = import_accounts(&*src, &*dst)?;
Ok(format_accounts(&accounts))
} else if args.cmd_import_wallet {
let wallet = PresaleWallet::open(&args.arg_path)?;
let password = load_password(&args.arg_password)?;
let kp = wallet.decrypt(&password)?;
let vault_ref = open_args_vault(&store, &args)?;
let account_ref = store.insert_account(vault_ref, kp.secret().clone(), &password)?;
Ok(format!("0x{:x}", account_ref.address))
} else if args.cmd_find_wallet_pass {
let passwords = load_password(&args.arg_password)?;
let passwords = passwords.as_str().lines().map(|line| str::to_owned(line).into()).collect::<VecDeque<_>>();
crack::run(passwords, &args.arg_path)?;
Ok(format!("Password not found."))
} else if args.cmd_remove {
let address = args.arg_address.parse().map_err(|_| ethstore::Error::InvalidAccount)?;
let password = load_password(&args.arg_password)?;
let account_ref = open_args_vault_account(&store, address, &args)?;
let ok = store.remove_account(&account_ref, &password).is_ok();
Ok(format!("{}", ok))
} else if args.cmd_sign {
let address = args.arg_address.parse().map_err(|_| ethstore::Error::InvalidAccount)?;
let message = args.arg_message.parse().map_err(|_| ethstore::Error::InvalidMessage)?;
let password = load_password(&args.arg_password)?;
let account_ref = open_args_vault_account(&store, address, &args)?;
let signature = store.sign(&account_ref, &password, &message)?;
Ok(format!("0x{}", signature))
} else if args.cmd_public {
let address = args.arg_address.parse().map_err(|_| ethstore::Error::InvalidAccount)?;
let password = load_password(&args.arg_password)?;
let account_ref = open_args_vault_account(&store, address, &args)?;
let public = store.public(&account_ref, &password)?;
Ok(format!("0x{:x}", public))
} else if args.cmd_list_vaults {
let vaults = store.list_vaults()?;
Ok(format_vaults(&vaults))
} else if args.cmd_create_vault {
let password = load_password(&args.arg_password)?;
store.create_vault(&args.arg_vault, &password)?;
Ok("OK".to_owned())
} else if args.cmd_change_vault_pwd {
let old_pwd = load_password(&args.arg_old_pwd)?;
let new_pwd = load_password(&args.arg_new_pwd)?;
store.open_vault(&args.arg_vault, &old_pwd)?;
store.change_vault_password(&args.arg_vault, &new_pwd)?;
Ok("OK".to_owned())
} else if args.cmd_move_to_vault {
let address = args.arg_address.parse().map_err(|_| ethstore::Error::InvalidAccount)?;
let password = load_password(&args.arg_password)?;
let account_ref = open_args_vault_account(&store, address, &args)?;
store.open_vault(&args.arg_vault, &password)?;
store.change_account_vault(SecretVaultRef::Vault(args.arg_vault), account_ref)?;
Ok("OK".to_owned())
} else if args.cmd_move_from_vault {
let address = args.arg_address.parse().map_err(|_| ethstore::Error::InvalidAccount)?;
let password = load_password(&args.arg_password)?;
store.open_vault(&args.arg_vault, &password)?;
store.change_account_vault(SecretVaultRef::Root, StoreAccountRef::vault(&args.arg_vault, address))?;
Ok("OK".to_owned())
} else {
Ok(format!("{}", USAGE))
}
let accounts = import_accounts(&*src, &*dst)?;
Ok(format_accounts(&accounts))
} else if args.cmd_import_wallet {
let wallet = PresaleWallet::open(&args.arg_path)?;
let password = load_password(&args.arg_password)?;
let kp = wallet.decrypt(&password)?;
let vault_ref = open_args_vault(&store, &args)?;
let account_ref = store.insert_account(vault_ref, kp.secret().clone(), &password)?;
Ok(format!("0x{:x}", account_ref.address))
} else if args.cmd_find_wallet_pass {
let passwords = load_password(&args.arg_password)?;
let passwords = passwords
.as_str()
.lines()
.map(|line| str::to_owned(line).into())
.collect::<VecDeque<_>>();
crack::run(passwords, &args.arg_path)?;
Ok(format!("Password not found."))
} else if args.cmd_remove {
let address = args
.arg_address
.parse()
.map_err(|_| ethstore::Error::InvalidAccount)?;
let password = load_password(&args.arg_password)?;
let account_ref = open_args_vault_account(&store, address, &args)?;
let ok = store.remove_account(&account_ref, &password).is_ok();
Ok(format!("{}", ok))
} else if args.cmd_sign {
let address = args
.arg_address
.parse()
.map_err(|_| ethstore::Error::InvalidAccount)?;
let message = args
.arg_message
.parse()
.map_err(|_| ethstore::Error::InvalidMessage)?;
let password = load_password(&args.arg_password)?;
let account_ref = open_args_vault_account(&store, address, &args)?;
let signature = store.sign(&account_ref, &password, &message)?;
Ok(format!("0x{}", signature))
} else if args.cmd_public {
let address = args
.arg_address
.parse()
.map_err(|_| ethstore::Error::InvalidAccount)?;
let password = load_password(&args.arg_password)?;
let account_ref = open_args_vault_account(&store, address, &args)?;
let public = store.public(&account_ref, &password)?;
Ok(format!("0x{:x}", public))
} else if args.cmd_list_vaults {
let vaults = store.list_vaults()?;
Ok(format_vaults(&vaults))
} else if args.cmd_create_vault {
let password = load_password(&args.arg_password)?;
store.create_vault(&args.arg_vault, &password)?;
Ok("OK".to_owned())
} else if args.cmd_change_vault_pwd {
let old_pwd = load_password(&args.arg_old_pwd)?;
let new_pwd = load_password(&args.arg_new_pwd)?;
store.open_vault(&args.arg_vault, &old_pwd)?;
store.change_vault_password(&args.arg_vault, &new_pwd)?;
Ok("OK".to_owned())
} else if args.cmd_move_to_vault {
let address = args
.arg_address
.parse()
.map_err(|_| ethstore::Error::InvalidAccount)?;
let password = load_password(&args.arg_password)?;
let account_ref = open_args_vault_account(&store, address, &args)?;
store.open_vault(&args.arg_vault, &password)?;
store.change_account_vault(SecretVaultRef::Vault(args.arg_vault), account_ref)?;
Ok("OK".to_owned())
} else if args.cmd_move_from_vault {
let address = args
.arg_address
.parse()
.map_err(|_| ethstore::Error::InvalidAccount)?;
let password = load_password(&args.arg_password)?;
store.open_vault(&args.arg_vault, &password)?;
store.change_account_vault(
SecretVaultRef::Root,
StoreAccountRef::vault(&args.arg_vault, address),
)?;
Ok("OK".to_owned())
} else {
Ok(format!("{}", USAGE))
};
}

View File

@ -15,68 +15,93 @@
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
extern crate tempdir;
use std::process::Command;
use std::{fs::File, io::Write, process::Command};
use tempdir::TempDir;
use std::fs::File;
use std::io::Write;
fn run(args: &[&str]) -> String {
let output = Command::new("cargo")
.args(&["run", "--"])
.args(args)
.output()
.unwrap();
assert!(output.status.success());
String::from_utf8(output.stdout).unwrap()
let output = Command::new("cargo")
.args(&["run", "--"])
.args(args)
.output()
.unwrap();
assert!(output.status.success());
String::from_utf8(output.stdout).unwrap()
}
#[test]
fn cli_cmd() {
Command::new("cargo")
.arg("build")
.output()
.unwrap();
Command::new("cargo").arg("build").output().unwrap();
let dir = TempDir::new("test-vault").unwrap();
let dir = TempDir::new("test-vault").unwrap();
let mut passwd = File::create(dir.path().join("test-password")).unwrap();
writeln!(passwd, "password").unwrap();
let mut passwd = File::create(dir.path().join("test-password")).unwrap();
writeln!(passwd, "password").unwrap();
let mut passwd2 = File::create(dir.path().join("test-vault-addr")).unwrap();
writeln!(passwd2, "password2").unwrap();
let mut passwd2 = File::create(dir.path().join("test-vault-addr")).unwrap();
writeln!(passwd2, "password2").unwrap();
let test_password_buf = dir.path().join("test-password");
let test_password: &str = test_password_buf.to_str().unwrap();
let dir_str: &str = dir.path().to_str().unwrap();
let test_vault_addr_buf = dir.path().join("test-vault-addr");
let test_vault_addr = test_vault_addr_buf.to_str().unwrap();
let test_password_buf = dir.path().join("test-password");
let test_password: &str = test_password_buf.to_str().unwrap();
let dir_str: &str = dir.path().to_str().unwrap();
let test_vault_addr_buf = dir.path().join("test-vault-addr");
let test_vault_addr = test_vault_addr_buf.to_str().unwrap();
run(&["create-vault", "test-vault", test_password, "--dir", dir_str]);
run(&[
"create-vault",
"test-vault",
test_password,
"--dir",
dir_str,
]);
let output = run(&["insert", "7d29fab185a33e2cd955812397354c472d2b84615b645aa135ff539f6b0d70d5",
test_vault_addr,
"--dir", dir_str,
"--vault", "test-vault",
"--vault-pwd", test_password]);
let address = output.trim();
let output = run(&[
"insert",
"7d29fab185a33e2cd955812397354c472d2b84615b645aa135ff539f6b0d70d5",
test_vault_addr,
"--dir",
dir_str,
"--vault",
"test-vault",
"--vault-pwd",
test_password,
]);
let address = output.trim();
let output = run(&["list",
"--dir", dir_str,
"--vault", "test-vault",
"--vault-pwd", test_password]);
assert_eq!(output, " 0: 0xa8fa5dd30a87bb9e3288d604eb74949c515ab66e\n");
let output = run(&[
"list",
"--dir",
dir_str,
"--vault",
"test-vault",
"--vault-pwd",
test_password,
]);
assert_eq!(output, " 0: 0xa8fa5dd30a87bb9e3288d604eb74949c515ab66e\n");
let output = run(&["sign", &address[2..],
test_vault_addr,
"7d29fab185a33e2cd955812397354c472d2b84615b645aa135ff539f6b0d70d5",
"--dir", dir_str,
"--vault", "test-vault",
"--vault-pwd", test_password]);
assert_eq!(output, "0x54ab6e5cf0c5cb40043fdca5d15d611a3a94285414a076dafecc8dc9c04183f413296a3defff61092c0bb478dc9887ec01070e1275234211208fb8f4be4a9b0101\n");
let output = run(&[
"sign",
&address[2..],
test_vault_addr,
"7d29fab185a33e2cd955812397354c472d2b84615b645aa135ff539f6b0d70d5",
"--dir",
dir_str,
"--vault",
"test-vault",
"--vault-pwd",
test_password,
]);
assert_eq!(output, "0x54ab6e5cf0c5cb40043fdca5d15d611a3a94285414a076dafecc8dc9c04183f413296a3defff61092c0bb478dc9887ec01070e1275234211208fb8f4be4a9b0101\n");
let output = run(&["public", &address[2..], test_vault_addr,
"--dir", dir_str,
"--vault", "test-vault",
"--vault-pwd", test_password]);
assert_eq!(output, "0x35f222d88b80151857a2877826d940104887376a94c1cbd2c8c7c192eb701df88a18a4ecb8b05b1466c5b3706042027b5e079fe3a3683e66d822b0e047aa3418\n");
let output = run(&[
"public",
&address[2..],
test_vault_addr,
"--dir",
dir_str,
"--vault",
"test-vault",
"--vault-pwd",
test_password,
]);
assert_eq!(output, "0x35f222d88b80151857a2877826d940104887376a94c1cbd2c8c7c192eb701df88a18a4ecb8b05b1466c5b3706042027b5e079fe3a3683e66d822b0e047aa3418\n");
}

View File

@ -18,42 +18,40 @@ use json;
#[derive(Debug, PartialEq, Clone)]
pub struct Aes128Ctr {
pub iv: [u8; 16],
pub iv: [u8; 16],
}
#[derive(Debug, PartialEq, Clone)]
pub enum Cipher {
Aes128Ctr(Aes128Ctr),
Aes128Ctr(Aes128Ctr),
}
impl From<json::Aes128Ctr> for Aes128Ctr {
fn from(json: json::Aes128Ctr) -> Self {
Aes128Ctr {
iv: json.iv.into()
}
}
fn from(json: json::Aes128Ctr) -> Self {
Aes128Ctr { iv: json.iv.into() }
}
}
impl Into<json::Aes128Ctr> for Aes128Ctr {
fn into(self) -> json::Aes128Ctr {
json::Aes128Ctr {
iv: From::from(self.iv)
}
}
fn into(self) -> json::Aes128Ctr {
json::Aes128Ctr {
iv: From::from(self.iv),
}
}
}
impl From<json::Cipher> for Cipher {
fn from(json: json::Cipher) -> Self {
match json {
json::Cipher::Aes128Ctr(params) => Cipher::Aes128Ctr(From::from(params)),
}
}
fn from(json: json::Cipher) -> Self {
match json {
json::Cipher::Aes128Ctr(params) => Cipher::Aes128Ctr(From::from(params)),
}
}
}
impl Into<json::Cipher> for Cipher {
fn into(self) -> json::Cipher {
match self {
Cipher::Aes128Ctr(params) => json::Cipher::Aes128Ctr(params.into()),
}
}
fn into(self) -> json::Cipher {
match self {
Cipher::Aes128Ctr(params) => json::Cipher::Aes128Ctr(params.into()),
}
}
}

View File

@ -14,198 +14,221 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::str;
use std::num::NonZeroU32;
use account::{Aes128Ctr, Cipher, Kdf, Pbkdf2, Prf};
use crypto::{self, Keccak256};
use ethkey::{Password, Secret};
use {json, Error, crypto};
use crypto::Keccak256;
use json;
use random::Random;
use smallvec::SmallVec;
use account::{Cipher, Kdf, Aes128Ctr, Pbkdf2, Prf};
use std::{num::NonZeroU32, str};
use Error;
/// Encrypted data
#[derive(Debug, PartialEq, Clone)]
pub struct Crypto {
/// Encryption parameters
pub cipher: Cipher,
/// Encrypted data buffer
pub ciphertext: Vec<u8>,
/// Key derivation function parameters
pub kdf: Kdf,
/// Message authentication code
pub mac: [u8; 32],
/// Encryption parameters
pub cipher: Cipher,
/// Encrypted data buffer
pub ciphertext: Vec<u8>,
/// Key derivation function parameters
pub kdf: Kdf,
/// Message authentication code
pub mac: [u8; 32],
}
impl From<json::Crypto> for Crypto {
fn from(json: json::Crypto) -> Self {
Crypto {
cipher: json.cipher.into(),
ciphertext: json.ciphertext.into(),
kdf: json.kdf.into(),
mac: json.mac.into(),
}
}
fn from(json: json::Crypto) -> Self {
Crypto {
cipher: json.cipher.into(),
ciphertext: json.ciphertext.into(),
kdf: json.kdf.into(),
mac: json.mac.into(),
}
}
}
impl From<Crypto> for json::Crypto {
fn from(c: Crypto) -> Self {
json::Crypto {
cipher: c.cipher.into(),
ciphertext: c.ciphertext.into(),
kdf: c.kdf.into(),
mac: c.mac.into(),
}
}
fn from(c: Crypto) -> Self {
json::Crypto {
cipher: c.cipher.into(),
ciphertext: c.ciphertext.into(),
kdf: c.kdf.into(),
mac: c.mac.into(),
}
}
}
impl str::FromStr for Crypto {
type Err = <json::Crypto as str::FromStr>::Err;
type Err = <json::Crypto as str::FromStr>::Err;
fn from_str(s: &str) -> Result<Self, Self::Err> {
s.parse::<json::Crypto>().map(Into::into)
}
fn from_str(s: &str) -> Result<Self, Self::Err> {
s.parse::<json::Crypto>().map(Into::into)
}
}
impl From<Crypto> for String {
fn from(c: Crypto) -> Self {
json::Crypto::from(c).into()
}
fn from(c: Crypto) -> Self {
json::Crypto::from(c).into()
}
}
impl Crypto {
/// Encrypt account secret
pub fn with_secret(secret: &Secret, password: &Password, iterations: NonZeroU32) -> Result<Self, crypto::Error> {
Crypto::with_plain(&*secret, password, iterations)
}
/// Encrypt account secret
pub fn with_secret(
secret: &Secret,
password: &Password,
iterations: NonZeroU32,
) -> Result<Self, crypto::Error> {
Crypto::with_plain(&*secret, password, iterations)
}
/// Encrypt custom plain data
pub fn with_plain(plain: &[u8], password: &Password, iterations: NonZeroU32) -> Result<Self, crypto::Error> {
let salt: [u8; 32] = Random::random();
let iv: [u8; 16] = Random::random();
/// Encrypt custom plain data
pub fn with_plain(
plain: &[u8],
password: &Password,
iterations: NonZeroU32,
) -> Result<Self, crypto::Error> {
let salt: [u8; 32] = Random::random();
let iv: [u8; 16] = Random::random();
// two parts of derived key
// DK = [ DK[0..15] DK[16..31] ] = [derived_left_bits, derived_right_bits]
let (derived_left_bits, derived_right_bits) =
crypto::derive_key_iterations(password.as_bytes(), &salt, iterations);
// two parts of derived key
// DK = [ DK[0..15] DK[16..31] ] = [derived_left_bits, derived_right_bits]
let (derived_left_bits, derived_right_bits) =
crypto::derive_key_iterations(password.as_bytes(), &salt, iterations);
// preallocated (on-stack in case of `Secret`) buffer to hold cipher
// length = length(plain) as we are using CTR-approach
let plain_len = plain.len();
let mut ciphertext: SmallVec<[u8; 32]> = SmallVec::from_vec(vec![0; plain_len]);
// preallocated (on-stack in case of `Secret`) buffer to hold cipher
// length = length(plain) as we are using CTR-approach
let plain_len = plain.len();
let mut ciphertext: SmallVec<[u8; 32]> = SmallVec::from_vec(vec![0; plain_len]);
// aes-128-ctr with initial vector of iv
crypto::aes::encrypt_128_ctr(&derived_left_bits, &iv, plain, &mut *ciphertext)?;
// aes-128-ctr with initial vector of iv
crypto::aes::encrypt_128_ctr(&derived_left_bits, &iv, plain, &mut *ciphertext)?;
// KECCAK(DK[16..31] ++ <ciphertext>), where DK[16..31] - derived_right_bits
let mac = crypto::derive_mac(&derived_right_bits, &*ciphertext).keccak256();
// KECCAK(DK[16..31] ++ <ciphertext>), where DK[16..31] - derived_right_bits
let mac = crypto::derive_mac(&derived_right_bits, &*ciphertext).keccak256();
Ok(Crypto {
cipher: Cipher::Aes128Ctr(Aes128Ctr {
iv: iv,
}),
ciphertext: ciphertext.into_vec(),
kdf: Kdf::Pbkdf2(Pbkdf2 {
dklen: crypto::KEY_LENGTH as u32,
salt: salt.to_vec(),
c: iterations,
prf: Prf::HmacSha256,
}),
mac: mac,
})
}
Ok(Crypto {
cipher: Cipher::Aes128Ctr(Aes128Ctr { iv: iv }),
ciphertext: ciphertext.into_vec(),
kdf: Kdf::Pbkdf2(Pbkdf2 {
dklen: crypto::KEY_LENGTH as u32,
salt: salt.to_vec(),
c: iterations,
prf: Prf::HmacSha256,
}),
mac: mac,
})
}
/// Try to decrypt and convert result to account secret
pub fn secret(&self, password: &Password) -> Result<Secret, Error> {
if self.ciphertext.len() > 32 {
return Err(Error::InvalidSecret);
}
/// Try to decrypt and convert result to account secret
pub fn secret(&self, password: &Password) -> Result<Secret, Error> {
if self.ciphertext.len() > 32 {
return Err(Error::InvalidSecret);
}
let secret = self.do_decrypt(password, 32)?;
Ok(Secret::from_unsafe_slice(&secret)?)
}
let secret = self.do_decrypt(password, 32)?;
Ok(Secret::from_unsafe_slice(&secret)?)
}
/// Try to decrypt and return result as is
pub fn decrypt(&self, password: &Password) -> Result<Vec<u8>, Error> {
let expected_len = self.ciphertext.len();
self.do_decrypt(password, expected_len)
}
/// Try to decrypt and return result as is
pub fn decrypt(&self, password: &Password) -> Result<Vec<u8>, Error> {
let expected_len = self.ciphertext.len();
self.do_decrypt(password, expected_len)
}
fn do_decrypt(&self, password: &Password, expected_len: usize) -> Result<Vec<u8>, Error> {
let (derived_left_bits, derived_right_bits) = match self.kdf {
Kdf::Pbkdf2(ref params) => crypto::derive_key_iterations(password.as_bytes(), &params.salt, params.c),
Kdf::Scrypt(ref params) => crypto::scrypt::derive_key(password.as_bytes(), &params.salt, params.n, params.p, params.r)?,
};
fn do_decrypt(&self, password: &Password, expected_len: usize) -> Result<Vec<u8>, Error> {
let (derived_left_bits, derived_right_bits) = match self.kdf {
Kdf::Pbkdf2(ref params) => {
crypto::derive_key_iterations(password.as_bytes(), &params.salt, params.c)
}
Kdf::Scrypt(ref params) => crypto::scrypt::derive_key(
password.as_bytes(),
&params.salt,
params.n,
params.p,
params.r,
)?,
};
let mac = crypto::derive_mac(&derived_right_bits, &self.ciphertext).keccak256();
let mac = crypto::derive_mac(&derived_right_bits, &self.ciphertext).keccak256();
if !crypto::is_equal(&mac, &self.mac) {
return Err(Error::InvalidPassword)
}
if !crypto::is_equal(&mac, &self.mac) {
return Err(Error::InvalidPassword);
}
let mut plain: SmallVec<[u8; 32]> = SmallVec::from_vec(vec![0; expected_len]);
let mut plain: SmallVec<[u8; 32]> = SmallVec::from_vec(vec![0; expected_len]);
match self.cipher {
Cipher::Aes128Ctr(ref params) => {
// checker by callers
debug_assert!(expected_len >= self.ciphertext.len());
match self.cipher {
Cipher::Aes128Ctr(ref params) => {
// checker by callers
debug_assert!(expected_len >= self.ciphertext.len());
let from = expected_len - self.ciphertext.len();
crypto::aes::decrypt_128_ctr(&derived_left_bits, &params.iv, &self.ciphertext, &mut plain[from..])?;
Ok(plain.into_iter().collect())
},
}
}
let from = expected_len - self.ciphertext.len();
crypto::aes::decrypt_128_ctr(
&derived_left_bits,
&params.iv,
&self.ciphertext,
&mut plain[from..],
)?;
Ok(plain.into_iter().collect())
}
}
}
}
#[cfg(test)]
mod tests {
use ethkey::{Generator, Random};
use super::{Crypto, Error, NonZeroU32};
use super::{Crypto, Error, NonZeroU32};
use ethkey::{Generator, Random};
lazy_static! {
static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(10240).expect("10240 > 0; qed");
}
lazy_static! {
static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(10240).expect("10240 > 0; qed");
}
#[test]
fn crypto_with_secret_create() {
let keypair = Random.generate().unwrap();
let passwd = "this is sparta".into();
let crypto = Crypto::with_secret(keypair.secret(), &passwd, *ITERATIONS).unwrap();
let secret = crypto.secret(&passwd).unwrap();
assert_eq!(keypair.secret(), &secret);
}
#[test]
fn crypto_with_secret_create() {
let keypair = Random.generate().unwrap();
let passwd = "this is sparta".into();
let crypto = Crypto::with_secret(keypair.secret(), &passwd, *ITERATIONS).unwrap();
let secret = crypto.secret(&passwd).unwrap();
assert_eq!(keypair.secret(), &secret);
}
#[test]
fn crypto_with_secret_invalid_password() {
let keypair = Random.generate().unwrap();
let crypto = Crypto::with_secret(keypair.secret(), &"this is sparta".into(), *ITERATIONS).unwrap();
assert_matches!(crypto.secret(&"this is sparta!".into()), Err(Error::InvalidPassword))
}
#[test]
fn crypto_with_secret_invalid_password() {
let keypair = Random.generate().unwrap();
let crypto =
Crypto::with_secret(keypair.secret(), &"this is sparta".into(), *ITERATIONS).unwrap();
assert_matches!(
crypto.secret(&"this is sparta!".into()),
Err(Error::InvalidPassword)
)
}
#[test]
fn crypto_with_null_plain_data() {
let original_data = b"";
let passwd = "this is sparta".into();
let crypto = Crypto::with_plain(&original_data[..], &passwd, *ITERATIONS).unwrap();
let decrypted_data = crypto.decrypt(&passwd).unwrap();
assert_eq!(original_data[..], *decrypted_data);
}
#[test]
fn crypto_with_null_plain_data() {
let original_data = b"";
let passwd = "this is sparta".into();
let crypto = Crypto::with_plain(&original_data[..], &passwd, *ITERATIONS).unwrap();
let decrypted_data = crypto.decrypt(&passwd).unwrap();
assert_eq!(original_data[..], *decrypted_data);
}
#[test]
fn crypto_with_tiny_plain_data() {
let original_data = b"{}";
let passwd = "this is sparta".into();
let crypto = Crypto::with_plain(&original_data[..], &passwd, *ITERATIONS).unwrap();
let decrypted_data = crypto.decrypt(&passwd).unwrap();
assert_eq!(original_data[..], *decrypted_data);
}
#[test]
fn crypto_with_tiny_plain_data() {
let original_data = b"{}";
let passwd = "this is sparta".into();
let crypto = Crypto::with_plain(&original_data[..], &passwd, *ITERATIONS).unwrap();
let decrypted_data = crypto.decrypt(&passwd).unwrap();
assert_eq!(original_data[..], *decrypted_data);
}
#[test]
fn crypto_with_huge_plain_data() {
let original_data: Vec<_> = (1..65536).map(|i| (i % 256) as u8).collect();
let passwd = "this is sparta".into();
let crypto = Crypto::with_plain(&original_data, &passwd, *ITERATIONS).unwrap();
let decrypted_data = crypto.decrypt(&passwd).unwrap();
assert_eq!(&original_data, &decrypted_data);
}
#[test]
fn crypto_with_huge_plain_data() {
let original_data: Vec<_> = (1..65536).map(|i| (i % 256) as u8).collect();
let passwd = "this is sparta".into();
let crypto = Crypto::with_plain(&original_data, &passwd, *ITERATIONS).unwrap();
let decrypted_data = crypto.decrypt(&passwd).unwrap();
assert_eq!(&original_data, &decrypted_data);
}
}

View File

@ -19,108 +19,108 @@ use std::num::NonZeroU32;
#[derive(Debug, PartialEq, Clone)]
pub enum Prf {
HmacSha256,
HmacSha256,
}
#[derive(Debug, PartialEq, Clone)]
pub struct Pbkdf2 {
pub c: NonZeroU32,
pub dklen: u32,
pub prf: Prf,
pub salt: Vec<u8>,
pub c: NonZeroU32,
pub dklen: u32,
pub prf: Prf,
pub salt: Vec<u8>,
}
#[derive(Debug, PartialEq, Clone)]
pub struct Scrypt {
pub dklen: u32,
pub p: u32,
pub n: u32,
pub r: u32,
pub salt: Vec<u8>,
pub dklen: u32,
pub p: u32,
pub n: u32,
pub r: u32,
pub salt: Vec<u8>,
}
#[derive(Debug, PartialEq, Clone)]
pub enum Kdf {
Pbkdf2(Pbkdf2),
Scrypt(Scrypt),
Pbkdf2(Pbkdf2),
Scrypt(Scrypt),
}
impl From<json::Prf> for Prf {
fn from(json: json::Prf) -> Self {
match json {
json::Prf::HmacSha256 => Prf::HmacSha256,
}
}
fn from(json: json::Prf) -> Self {
match json {
json::Prf::HmacSha256 => Prf::HmacSha256,
}
}
}
impl Into<json::Prf> for Prf {
fn into(self) -> json::Prf {
match self {
Prf::HmacSha256 => json::Prf::HmacSha256,
}
}
fn into(self) -> json::Prf {
match self {
Prf::HmacSha256 => json::Prf::HmacSha256,
}
}
}
impl From<json::Pbkdf2> for Pbkdf2 {
fn from(json: json::Pbkdf2) -> Self {
Pbkdf2 {
c: json.c,
dklen: json.dklen,
prf: From::from(json.prf),
salt: json.salt.into(),
}
}
fn from(json: json::Pbkdf2) -> Self {
Pbkdf2 {
c: json.c,
dklen: json.dklen,
prf: From::from(json.prf),
salt: json.salt.into(),
}
}
}
impl Into<json::Pbkdf2> for Pbkdf2 {
fn into(self) -> json::Pbkdf2 {
json::Pbkdf2 {
c: self.c,
dklen: self.dklen,
prf: self.prf.into(),
salt: From::from(self.salt),
}
}
fn into(self) -> json::Pbkdf2 {
json::Pbkdf2 {
c: self.c,
dklen: self.dklen,
prf: self.prf.into(),
salt: From::from(self.salt),
}
}
}
impl From<json::Scrypt> for Scrypt {
fn from(json: json::Scrypt) -> Self {
Scrypt {
dklen: json.dklen,
p: json.p,
n: json.n,
r: json.r,
salt: json.salt.into(),
}
}
fn from(json: json::Scrypt) -> Self {
Scrypt {
dklen: json.dklen,
p: json.p,
n: json.n,
r: json.r,
salt: json.salt.into(),
}
}
}
impl Into<json::Scrypt> for Scrypt {
fn into(self) -> json::Scrypt {
json::Scrypt {
dklen: self.dklen,
p: self.p,
n: self.n,
r: self.r,
salt: From::from(self.salt),
}
}
fn into(self) -> json::Scrypt {
json::Scrypt {
dklen: self.dklen,
p: self.p,
n: self.n,
r: self.r,
salt: From::from(self.salt),
}
}
}
impl From<json::Kdf> for Kdf {
fn from(json: json::Kdf) -> Self {
match json {
json::Kdf::Pbkdf2(params) => Kdf::Pbkdf2(From::from(params)),
json::Kdf::Scrypt(params) => Kdf::Scrypt(From::from(params)),
}
}
fn from(json: json::Kdf) -> Self {
match json {
json::Kdf::Pbkdf2(params) => Kdf::Pbkdf2(From::from(params)),
json::Kdf::Scrypt(params) => Kdf::Scrypt(From::from(params)),
}
}
}
impl Into<json::Kdf> for Kdf {
fn into(self) -> json::Kdf {
match self {
Kdf::Pbkdf2(params) => json::Kdf::Pbkdf2(params.into()),
Kdf::Scrypt(params) => json::Kdf::Scrypt(params.into()),
}
}
fn into(self) -> json::Kdf {
match self {
Kdf::Pbkdf2(params) => json::Kdf::Pbkdf2(params.into()),
Kdf::Scrypt(params) => json::Kdf::Scrypt(params.into()),
}
}
}

View File

@ -20,8 +20,10 @@ mod kdf;
mod safe_account;
mod version;
pub use self::cipher::{Cipher, Aes128Ctr};
pub use self::crypto::Crypto;
pub use self::kdf::{Kdf, Pbkdf2, Scrypt, Prf};
pub use self::safe_account::SafeAccount;
pub use self::version::Version;
pub use self::{
cipher::{Aes128Ctr, Cipher},
crypto::Crypto,
kdf::{Kdf, Pbkdf2, Prf, Scrypt},
safe_account::SafeAccount,
version::Version,
};

View File

@ -14,221 +14,273 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use ethkey::{self, KeyPair, sign, Address, Password, Signature, Message, Public, Secret};
use ethkey::crypto::ecdh::agree;
use {json, Error};
use super::crypto::Crypto;
use account::Version;
use crypto;
use super::crypto::Crypto;
use ethkey::{
self, crypto::ecdh::agree, sign, Address, KeyPair, Message, Password, Public, Secret, Signature,
};
use json;
use std::num::NonZeroU32;
use Error;
/// Account representation.
#[derive(Debug, PartialEq, Clone)]
pub struct SafeAccount {
/// Account ID
pub id: [u8; 16],
/// Account version
pub version: Version,
/// Account address
pub address: Address,
/// Account private key derivation definition.
pub crypto: Crypto,
/// Account filename
pub filename: Option<String>,
/// Account name
pub name: String,
/// Account metadata
pub meta: String,
/// Account ID
pub id: [u8; 16],
/// Account version
pub version: Version,
/// Account address
pub address: Address,
/// Account private key derivation definition.
pub crypto: Crypto,
/// Account filename
pub filename: Option<String>,
/// Account name
pub name: String,
/// Account metadata
pub meta: String,
}
impl Into<json::KeyFile> for SafeAccount {
fn into(self) -> json::KeyFile {
json::KeyFile {
id: From::from(self.id),
version: self.version.into(),
address: Some(self.address.into()),
crypto: self.crypto.into(),
name: Some(self.name.into()),
meta: Some(self.meta.into()),
}
}
fn into(self) -> json::KeyFile {
json::KeyFile {
id: From::from(self.id),
version: self.version.into(),
address: Some(self.address.into()),
crypto: self.crypto.into(),
name: Some(self.name.into()),
meta: Some(self.meta.into()),
}
}
}
impl SafeAccount {
/// Create a new account
pub fn create(
keypair: &KeyPair,
id: [u8; 16],
password: &Password,
iterations: NonZeroU32,
name: String,
meta: String
) -> Result<Self, crypto::Error> {
Ok(SafeAccount {
id: id,
version: Version::V3,
crypto: Crypto::with_secret(keypair.secret(), password, iterations)?,
address: keypair.address(),
filename: None,
name: name,
meta: meta,
})
}
/// Create a new account
pub fn create(
keypair: &KeyPair,
id: [u8; 16],
password: &Password,
iterations: NonZeroU32,
name: String,
meta: String,
) -> Result<Self, crypto::Error> {
Ok(SafeAccount {
id: id,
version: Version::V3,
crypto: Crypto::with_secret(keypair.secret(), password, iterations)?,
address: keypair.address(),
filename: None,
name: name,
meta: meta,
})
}
/// Create a new `SafeAccount` from the given `json`; if it was read from a
/// file, the `filename` should be `Some` name. If it is as yet anonymous, then it
/// can be left `None`.
/// In case `password` is provided, we will attempt to read the secret from the keyfile
/// and derive the address from it instead of reading it directly.
/// Providing password is required for `json::KeyFile`s with no address.
pub fn from_file(json: json::KeyFile, filename: Option<String>, password: &Option<Password>) -> Result<Self, Error> {
let crypto = Crypto::from(json.crypto);
let address = match (password, &json.address) {
(None, Some(json_address)) => json_address.into(),
(None, None) => Err(Error::Custom(
"This keystore does not contain address. You need to provide password to import it".into()))?,
(Some(password), json_address) => {
let derived_address = KeyPair::from_secret(
crypto.secret(&password).map_err(|_| Error::InvalidPassword)?
)?.address();
/// Create a new `SafeAccount` from the given `json`; if it was read from a
/// file, the `filename` should be `Some` name. If it is as yet anonymous, then it
/// can be left `None`.
/// In case `password` is provided, we will attempt to read the secret from the keyfile
/// and derive the address from it instead of reading it directly.
/// Providing password is required for `json::KeyFile`s with no address.
pub fn from_file(
json: json::KeyFile,
filename: Option<String>,
password: &Option<Password>,
) -> Result<Self, Error> {
let crypto = Crypto::from(json.crypto);
let address = match (password, &json.address) {
(None, Some(json_address)) => json_address.into(),
(None, None) => Err(Error::Custom(
"This keystore does not contain address. You need to provide password to import it"
.into(),
))?,
(Some(password), json_address) => {
let derived_address = KeyPair::from_secret(
crypto
.secret(&password)
.map_err(|_| Error::InvalidPassword)?,
)?
.address();
match json_address {
Some(json_address) => {
let json_address = json_address.into();
if derived_address != json_address {
warn!("Detected address mismatch when opening an account. Derived: {:?}, in json got: {:?}",
match json_address {
Some(json_address) => {
let json_address = json_address.into();
if derived_address != json_address {
warn!("Detected address mismatch when opening an account. Derived: {:?}, in json got: {:?}",
derived_address, json_address);
}
},
_ => {},
}
derived_address
}
};
}
}
_ => {}
}
derived_address
}
};
Ok(SafeAccount {
id: json.id.into(),
version: json.version.into(),
address,
crypto,
filename,
name: json.name.unwrap_or(String::new()),
meta: json.meta.unwrap_or("{}".to_owned()),
})
}
Ok(SafeAccount {
id: json.id.into(),
version: json.version.into(),
address,
crypto,
filename,
name: json.name.unwrap_or(String::new()),
meta: json.meta.unwrap_or("{}".to_owned()),
})
}
/// Create a new `SafeAccount` from the given vault `json`; if it was read from a
/// file, the `filename` should be `Some` name. If it is as yet anonymous, then it
/// can be left `None`.
pub fn from_vault_file(password: &Password, json: json::VaultKeyFile, filename: Option<String>) -> Result<Self, Error> {
let meta_crypto: Crypto = json.metacrypto.into();
let meta_plain = meta_crypto.decrypt(password)?;
let meta_plain = json::VaultKeyMeta::load(&meta_plain).map_err(|e| Error::Custom(format!("{:?}", e)))?;
/// Create a new `SafeAccount` from the given vault `json`; if it was read from a
/// file, the `filename` should be `Some` name. If it is as yet anonymous, then it
/// can be left `None`.
pub fn from_vault_file(
password: &Password,
json: json::VaultKeyFile,
filename: Option<String>,
) -> Result<Self, Error> {
let meta_crypto: Crypto = json.metacrypto.into();
let meta_plain = meta_crypto.decrypt(password)?;
let meta_plain =
json::VaultKeyMeta::load(&meta_plain).map_err(|e| Error::Custom(format!("{:?}", e)))?;
SafeAccount::from_file(json::KeyFile {
id: json.id,
version: json.version,
crypto: json.crypto,
address: Some(meta_plain.address),
name: meta_plain.name,
meta: meta_plain.meta,
}, filename, &None)
}
SafeAccount::from_file(
json::KeyFile {
id: json.id,
version: json.version,
crypto: json.crypto,
address: Some(meta_plain.address),
name: meta_plain.name,
meta: meta_plain.meta,
},
filename,
&None,
)
}
/// Create a new `VaultKeyFile` from the given `self`
pub fn into_vault_file(self, iterations: NonZeroU32, password: &Password) -> Result<json::VaultKeyFile, Error> {
let meta_plain = json::VaultKeyMeta {
address: self.address.into(),
name: Some(self.name),
meta: Some(self.meta),
};
let meta_plain = meta_plain.write().map_err(|e| Error::Custom(format!("{:?}", e)))?;
let meta_crypto = Crypto::with_plain(&meta_plain, password, iterations)?;
/// Create a new `VaultKeyFile` from the given `self`
pub fn into_vault_file(
self,
iterations: NonZeroU32,
password: &Password,
) -> Result<json::VaultKeyFile, Error> {
let meta_plain = json::VaultKeyMeta {
address: self.address.into(),
name: Some(self.name),
meta: Some(self.meta),
};
let meta_plain = meta_plain
.write()
.map_err(|e| Error::Custom(format!("{:?}", e)))?;
let meta_crypto = Crypto::with_plain(&meta_plain, password, iterations)?;
Ok(json::VaultKeyFile {
id: self.id.into(),
version: self.version.into(),
crypto: self.crypto.into(),
metacrypto: meta_crypto.into(),
})
}
Ok(json::VaultKeyFile {
id: self.id.into(),
version: self.version.into(),
crypto: self.crypto.into(),
metacrypto: meta_crypto.into(),
})
}
/// Sign a message.
pub fn sign(&self, password: &Password, message: &Message) -> Result<Signature, Error> {
let secret = self.crypto.secret(password)?;
sign(&secret, message).map_err(From::from)
}
/// Sign a message.
pub fn sign(&self, password: &Password, message: &Message) -> Result<Signature, Error> {
let secret = self.crypto.secret(password)?;
sign(&secret, message).map_err(From::from)
}
/// Decrypt a message.
pub fn decrypt(&self, password: &Password, shared_mac: &[u8], message: &[u8]) -> Result<Vec<u8>, Error> {
let secret = self.crypto.secret(password)?;
ethkey::crypto::ecies::decrypt(&secret, shared_mac, message).map_err(From::from)
}
/// Decrypt a message.
pub fn decrypt(
&self,
password: &Password,
shared_mac: &[u8],
message: &[u8],
) -> Result<Vec<u8>, Error> {
let secret = self.crypto.secret(password)?;
ethkey::crypto::ecies::decrypt(&secret, shared_mac, message).map_err(From::from)
}
/// Agree on shared key.
pub fn agree(&self, password: &Password, other: &Public) -> Result<Secret, Error> {
let secret = self.crypto.secret(password)?;
agree(&secret, other).map_err(From::from)
}
/// Agree on shared key.
pub fn agree(&self, password: &Password, other: &Public) -> Result<Secret, Error> {
let secret = self.crypto.secret(password)?;
agree(&secret, other).map_err(From::from)
}
/// Derive public key.
pub fn public(&self, password: &Password) -> Result<Public, Error> {
let secret = self.crypto.secret(password)?;
Ok(KeyPair::from_secret(secret)?.public().clone())
}
/// Derive public key.
pub fn public(&self, password: &Password) -> Result<Public, Error> {
let secret = self.crypto.secret(password)?;
Ok(KeyPair::from_secret(secret)?.public().clone())
}
/// Change account's password.
pub fn change_password(&self, old_password: &Password, new_password: &Password, iterations: NonZeroU32) -> Result<Self, Error> {
let secret = self.crypto.secret(old_password)?;
let result = SafeAccount {
id: self.id.clone(),
version: self.version.clone(),
crypto: Crypto::with_secret(&secret, new_password, iterations)?,
address: self.address.clone(),
filename: self.filename.clone(),
name: self.name.clone(),
meta: self.meta.clone(),
};
Ok(result)
}
/// Change account's password.
pub fn change_password(
&self,
old_password: &Password,
new_password: &Password,
iterations: NonZeroU32,
) -> Result<Self, Error> {
let secret = self.crypto.secret(old_password)?;
let result = SafeAccount {
id: self.id.clone(),
version: self.version.clone(),
crypto: Crypto::with_secret(&secret, new_password, iterations)?,
address: self.address.clone(),
filename: self.filename.clone(),
name: self.name.clone(),
meta: self.meta.clone(),
};
Ok(result)
}
/// Check if password matches the account.
pub fn check_password(&self, password: &Password) -> bool {
self.crypto.secret(password).is_ok()
}
/// Check if password matches the account.
pub fn check_password(&self, password: &Password) -> bool {
self.crypto.secret(password).is_ok()
}
}
#[cfg(test)]
mod tests {
use ethkey::{Generator, Random, verify_public, Message};
use super::{SafeAccount, NonZeroU32};
use super::{NonZeroU32, SafeAccount};
use ethkey::{verify_public, Generator, Message, Random};
lazy_static! {
static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(10240).expect("10240 > 0; qed");
}
lazy_static! {
static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(10240).expect("10240 > 0; qed");
}
#[test]
fn sign_and_verify_public() {
let keypair = Random.generate().unwrap();
let password = "hello world".into();
let message = Message::default();
let account = SafeAccount::create(
&keypair,
[0u8; 16],
&password,
*ITERATIONS,
"Test".to_owned(),
"{}".to_owned(),
);
let signature = account.unwrap().sign(&password, &message).unwrap();
assert!(verify_public(keypair.public(), &signature, &message).unwrap());
}
#[test]
fn sign_and_verify_public() {
let keypair = Random.generate().unwrap();
let password = "hello world".into();
let message = Message::default();
let account = SafeAccount::create(&keypair, [0u8; 16], &password, *ITERATIONS, "Test".to_owned(), "{}".to_owned());
let signature = account.unwrap().sign(&password, &message).unwrap();
assert!(verify_public(keypair.public(), &signature, &message).unwrap());
}
#[test]
fn change_password() {
let keypair = Random.generate().unwrap();
let first_password = "hello world".into();
let sec_password = "this is sparta".into();
let message = Message::default();
let account = SafeAccount::create(&keypair, [0u8; 16], &first_password, *ITERATIONS, "Test".to_owned(), "{}".to_owned()).unwrap();
let new_account = account.change_password(&first_password, &sec_password, *ITERATIONS).unwrap();
assert!(account.sign(&first_password, &message).is_ok());
assert!(account.sign(&sec_password, &message).is_err());
assert!(new_account.sign(&first_password, &message).is_err());
assert!(new_account.sign(&sec_password, &message).is_ok());
}
#[test]
fn change_password() {
let keypair = Random.generate().unwrap();
let first_password = "hello world".into();
let sec_password = "this is sparta".into();
let message = Message::default();
let account = SafeAccount::create(
&keypair,
[0u8; 16],
&first_password,
*ITERATIONS,
"Test".to_owned(),
"{}".to_owned(),
)
.unwrap();
let new_account = account
.change_password(&first_password, &sec_password, *ITERATIONS)
.unwrap();
assert!(account.sign(&first_password, &message).is_ok());
assert!(account.sign(&sec_password, &message).is_err());
assert!(new_account.sign(&first_password, &message).is_err());
assert!(new_account.sign(&sec_password, &message).is_ok());
}
}

View File

@ -18,21 +18,21 @@ use json;
#[derive(Debug, PartialEq, Clone)]
pub enum Version {
V3,
V3,
}
impl From<json::Version> for Version {
fn from(json: json::Version) -> Self {
match json {
json::Version::V3 => Version::V3,
}
}
fn from(json: json::Version) -> Self {
match json {
json::Version::V3 => Version::V3,
}
}
}
impl Into<json::Version> for Version {
fn into(self) -> json::Version {
match self {
Version::V3 => json::Version::V3,
}
}
fn into(self) -> json::Version {
match self {
Version::V3 => json::Version::V3,
}
}
}

View File

@ -14,90 +14,101 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::{fs, io};
use std::io::Write;
use std::path::{PathBuf, Path};
use std::collections::HashMap;
use time;
use {json, SafeAccount, Error};
use json::Uuid;
use super::{KeyDirectory, VaultKeyDirectory, VaultKeyDirectoryProvider, VaultKey};
use super::vault::{VAULT_FILE_NAME, VaultDiskDirectory};
use super::{
vault::{VaultDiskDirectory, VAULT_FILE_NAME},
KeyDirectory, VaultKey, VaultKeyDirectory, VaultKeyDirectoryProvider,
};
use ethkey::Password;
use json::{self, Uuid};
use std::{
collections::HashMap,
fs, io,
io::Write,
path::{Path, PathBuf},
};
use time;
use Error;
use SafeAccount;
const IGNORED_FILES: &'static [&'static str] = &[
"thumbs.db",
"address_book.json",
"dapps_policy.json",
"dapps_accounts.json",
"dapps_history.json",
"vault.json",
"thumbs.db",
"address_book.json",
"dapps_policy.json",
"dapps_accounts.json",
"dapps_history.json",
"vault.json",
];
/// Find a unique filename that does not exist using four-letter random suffix.
pub fn find_unique_filename_using_random_suffix(parent_path: &Path, original_filename: &str) -> io::Result<String> {
let mut path = parent_path.join(original_filename);
let mut deduped_filename = original_filename.to_string();
pub fn find_unique_filename_using_random_suffix(
parent_path: &Path,
original_filename: &str,
) -> io::Result<String> {
let mut path = parent_path.join(original_filename);
let mut deduped_filename = original_filename.to_string();
if path.exists() {
const MAX_RETRIES: usize = 500;
let mut retries = 0;
if path.exists() {
const MAX_RETRIES: usize = 500;
let mut retries = 0;
while path.exists() {
if retries >= MAX_RETRIES {
return Err(io::Error::new(io::ErrorKind::Other, "Exceeded maximum retries when deduplicating filename."));
}
while path.exists() {
if retries >= MAX_RETRIES {
return Err(io::Error::new(
io::ErrorKind::Other,
"Exceeded maximum retries when deduplicating filename.",
));
}
let suffix = ::random::random_string(4);
deduped_filename = format!("{}-{}", original_filename, suffix);
path.set_file_name(&deduped_filename);
retries += 1;
}
}
let suffix = ::random::random_string(4);
deduped_filename = format!("{}-{}", original_filename, suffix);
path.set_file_name(&deduped_filename);
retries += 1;
}
}
Ok(deduped_filename)
Ok(deduped_filename)
}
/// Create a new file and restrict permissions to owner only. It errors if the file already exists.
#[cfg(unix)]
pub fn create_new_file_with_permissions_to_owner(file_path: &Path) -> io::Result<fs::File> {
use libc;
use std::os::unix::fs::OpenOptionsExt;
use libc;
use std::os::unix::fs::OpenOptionsExt;
fs::OpenOptions::new()
.write(true)
.create_new(true)
.mode((libc::S_IWUSR | libc::S_IRUSR) as u32)
.open(file_path)
fs::OpenOptions::new()
.write(true)
.create_new(true)
.mode((libc::S_IWUSR | libc::S_IRUSR) as u32)
.open(file_path)
}
/// Create a new file and restrict permissions to owner only. It errors if the file already exists.
#[cfg(not(unix))]
pub fn create_new_file_with_permissions_to_owner(file_path: &Path) -> io::Result<fs::File> {
fs::OpenOptions::new()
.write(true)
.create_new(true)
.open(file_path)
fs::OpenOptions::new()
.write(true)
.create_new(true)
.open(file_path)
}
/// Create a new file and restrict permissions to owner only. It replaces the existing file if it already exists.
#[cfg(unix)]
pub fn replace_file_with_permissions_to_owner(file_path: &Path) -> io::Result<fs::File> {
use libc;
use std::os::unix::fs::PermissionsExt;
use libc;
use std::os::unix::fs::PermissionsExt;
let file = fs::File::create(file_path)?;
let mut permissions = file.metadata()?.permissions();
permissions.set_mode((libc::S_IWUSR | libc::S_IRUSR) as u32);
file.set_permissions(permissions)?;
let file = fs::File::create(file_path)?;
let mut permissions = file.metadata()?.permissions();
permissions.set_mode((libc::S_IWUSR | libc::S_IRUSR) as u32);
file.set_permissions(permissions)?;
Ok(file)
Ok(file)
}
/// Create a new file and restrict permissions to owner only. It replaces the existing file if it already exists.
#[cfg(not(unix))]
pub fn replace_file_with_permissions_to_owner(file_path: &Path) -> io::Result<fs::File> {
fs::File::create(file_path)
fs::File::create(file_path)
}
/// Root keys directory implementation
@ -105,388 +116,495 @@ pub type RootDiskDirectory = DiskDirectory<DiskKeyFileManager>;
/// Disk directory key file manager
pub trait KeyFileManager: Send + Sync {
/// Read `SafeAccount` from given key file stream
fn read<T>(&self, filename: Option<String>, reader: T) -> Result<SafeAccount, Error> where T: io::Read;
/// Read `SafeAccount` from given key file stream
fn read<T>(&self, filename: Option<String>, reader: T) -> Result<SafeAccount, Error>
where
T: io::Read;
/// Write `SafeAccount` to given key file stream
fn write<T>(&self, account: SafeAccount, writer: &mut T) -> Result<(), Error> where T: io::Write;
/// Write `SafeAccount` to given key file stream
fn write<T>(&self, account: SafeAccount, writer: &mut T) -> Result<(), Error>
where
T: io::Write;
}
/// Disk-based keys directory implementation
pub struct DiskDirectory<T> where T: KeyFileManager {
path: PathBuf,
key_manager: T,
pub struct DiskDirectory<T>
where
T: KeyFileManager,
{
path: PathBuf,
key_manager: T,
}
/// Keys file manager for root keys directory
#[derive(Default)]
pub struct DiskKeyFileManager {
password: Option<Password>,
password: Option<Password>,
}
impl RootDiskDirectory {
pub fn create<P>(path: P) -> Result<Self, Error> where P: AsRef<Path> {
fs::create_dir_all(&path)?;
Ok(Self::at(path))
}
pub fn create<P>(path: P) -> Result<Self, Error>
where
P: AsRef<Path>,
{
fs::create_dir_all(&path)?;
Ok(Self::at(path))
}
/// allows to read keyfiles with given password (needed for keyfiles w/o address)
pub fn with_password(&self, password: Option<Password>) -> Self {
DiskDirectory::new(&self.path, DiskKeyFileManager { password })
}
/// allows to read keyfiles with given password (needed for keyfiles w/o address)
pub fn with_password(&self, password: Option<Password>) -> Self {
DiskDirectory::new(&self.path, DiskKeyFileManager { password })
}
pub fn at<P>(path: P) -> Self where P: AsRef<Path> {
DiskDirectory::new(path, DiskKeyFileManager::default())
}
pub fn at<P>(path: P) -> Self
where
P: AsRef<Path>,
{
DiskDirectory::new(path, DiskKeyFileManager::default())
}
}
impl<T> DiskDirectory<T> where T: KeyFileManager {
/// Create new disk directory instance
pub fn new<P>(path: P, key_manager: T) -> Self where P: AsRef<Path> {
DiskDirectory {
path: path.as_ref().to_path_buf(),
key_manager: key_manager,
}
}
impl<T> DiskDirectory<T>
where
T: KeyFileManager,
{
/// Create new disk directory instance
pub fn new<P>(path: P, key_manager: T) -> Self
where
P: AsRef<Path>,
{
DiskDirectory {
path: path.as_ref().to_path_buf(),
key_manager: key_manager,
}
}
fn files(&self) -> Result<Vec<PathBuf>, Error> {
Ok(fs::read_dir(&self.path)?
.flat_map(Result::ok)
.filter(|entry| {
let metadata = entry.metadata().ok();
let file_name = entry.file_name();
let name = file_name.to_string_lossy();
// filter directories
metadata.map_or(false, |m| !m.is_dir()) &&
fn files(&self) -> Result<Vec<PathBuf>, Error> {
Ok(fs::read_dir(&self.path)?
.flat_map(Result::ok)
.filter(|entry| {
let metadata = entry.metadata().ok();
let file_name = entry.file_name();
let name = file_name.to_string_lossy();
// filter directories
metadata.map_or(false, |m| !m.is_dir()) &&
// hidden files
!name.starts_with(".") &&
// other ignored files
!IGNORED_FILES.contains(&&*name)
})
.map(|entry| entry.path())
.collect::<Vec<PathBuf>>()
)
}
})
.map(|entry| entry.path())
.collect::<Vec<PathBuf>>())
}
pub fn files_hash(&self) -> Result<u64, Error> {
use std::collections::hash_map::DefaultHasher;
use std::hash::Hasher;
pub fn files_hash(&self) -> Result<u64, Error> {
use std::{collections::hash_map::DefaultHasher, hash::Hasher};
let mut hasher = DefaultHasher::new();
let files = self.files()?;
for file in files {
hasher.write(file.to_str().unwrap_or("").as_bytes())
}
let mut hasher = DefaultHasher::new();
let files = self.files()?;
for file in files {
hasher.write(file.to_str().unwrap_or("").as_bytes())
}
Ok(hasher.finish())
}
Ok(hasher.finish())
}
fn last_modification_date(&self) -> Result<u64, Error> {
use std::time::{Duration, UNIX_EPOCH};
let duration = fs::metadata(&self.path)?.modified()?.duration_since(UNIX_EPOCH).unwrap_or(Duration::default());
let timestamp = duration.as_secs() ^ (duration.subsec_nanos() as u64);
Ok(timestamp)
}
fn last_modification_date(&self) -> Result<u64, Error> {
use std::time::{Duration, UNIX_EPOCH};
let duration = fs::metadata(&self.path)?
.modified()?
.duration_since(UNIX_EPOCH)
.unwrap_or(Duration::default());
let timestamp = duration.as_secs() ^ (duration.subsec_nanos() as u64);
Ok(timestamp)
}
/// all accounts found in keys directory
fn files_content(&self) -> Result<HashMap<PathBuf, SafeAccount>, Error> {
// it's not done using one iterator cause
// there is an issue with rustc and it takes tooo much time to compile
let paths = self.files()?;
Ok(paths
.into_iter()
.filter_map(|path| {
let filename = Some(path.file_name().and_then(|n| n.to_str()).expect("Keys have valid UTF8 names only.").to_owned());
fs::File::open(path.clone())
.map_err(Into::into)
.and_then(|file| self.key_manager.read(filename, file))
.map_err(|err| {
warn!("Invalid key file: {:?} ({})", path, err);
err
})
.map(|account| (path, account))
.ok()
})
.collect()
)
}
/// all accounts found in keys directory
fn files_content(&self) -> Result<HashMap<PathBuf, SafeAccount>, Error> {
// it's not done using one iterator cause
// there is an issue with rustc and it takes tooo much time to compile
let paths = self.files()?;
Ok(paths
.into_iter()
.filter_map(|path| {
let filename = Some(
path.file_name()
.and_then(|n| n.to_str())
.expect("Keys have valid UTF8 names only.")
.to_owned(),
);
fs::File::open(path.clone())
.map_err(Into::into)
.and_then(|file| self.key_manager.read(filename, file))
.map_err(|err| {
warn!("Invalid key file: {:?} ({})", path, err);
err
})
.map(|account| (path, account))
.ok()
})
.collect())
}
/// insert account with given filename. if the filename is a duplicate of any stored account and dedup is set to
/// true, a random suffix is appended to the filename.
pub fn insert_with_filename(&self, account: SafeAccount, mut filename: String, dedup: bool) -> Result<SafeAccount, Error> {
if dedup {
filename = find_unique_filename_using_random_suffix(&self.path, &filename)?;
}
/// insert account with given filename. if the filename is a duplicate of any stored account and dedup is set to
/// true, a random suffix is appended to the filename.
pub fn insert_with_filename(
&self,
account: SafeAccount,
mut filename: String,
dedup: bool,
) -> Result<SafeAccount, Error> {
if dedup {
filename = find_unique_filename_using_random_suffix(&self.path, &filename)?;
}
// path to keyfile
let keyfile_path = self.path.join(filename.as_str());
// path to keyfile
let keyfile_path = self.path.join(filename.as_str());
// update account filename
let original_account = account.clone();
let mut account = account;
account.filename = Some(filename);
// update account filename
let original_account = account.clone();
let mut account = account;
account.filename = Some(filename);
{
// save the file
let mut file = if dedup {
create_new_file_with_permissions_to_owner(&keyfile_path)?
} else {
replace_file_with_permissions_to_owner(&keyfile_path)?
};
{
// save the file
let mut file = if dedup {
create_new_file_with_permissions_to_owner(&keyfile_path)?
} else {
replace_file_with_permissions_to_owner(&keyfile_path)?
};
// write key content
self.key_manager.write(original_account, &mut file).map_err(|e| Error::Custom(format!("{:?}", e)))?;
// write key content
self.key_manager
.write(original_account, &mut file)
.map_err(|e| Error::Custom(format!("{:?}", e)))?;
file.flush()?;
file.sync_all()?;
}
file.flush()?;
file.sync_all()?;
}
Ok(account)
}
Ok(account)
}
/// Get key file manager referece
pub fn key_manager(&self) -> &T {
&self.key_manager
}
/// Get key file manager referece
pub fn key_manager(&self) -> &T {
&self.key_manager
}
}
impl<T> KeyDirectory for DiskDirectory<T> where T: KeyFileManager {
fn load(&self) -> Result<Vec<SafeAccount>, Error> {
let accounts = self.files_content()?
.into_iter()
.map(|(_, account)| account)
.collect();
Ok(accounts)
}
impl<T> KeyDirectory for DiskDirectory<T>
where
T: KeyFileManager,
{
fn load(&self) -> Result<Vec<SafeAccount>, Error> {
let accounts = self
.files_content()?
.into_iter()
.map(|(_, account)| account)
.collect();
Ok(accounts)
}
fn update(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
// Disk store handles updates correctly iff filename is the same
let filename = account_filename(&account);
self.insert_with_filename(account, filename, false)
}
fn update(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
// Disk store handles updates correctly iff filename is the same
let filename = account_filename(&account);
self.insert_with_filename(account, filename, false)
}
fn insert(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
let filename = account_filename(&account);
self.insert_with_filename(account, filename, true)
}
fn insert(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
let filename = account_filename(&account);
self.insert_with_filename(account, filename, true)
}
fn remove(&self, account: &SafeAccount) -> Result<(), Error> {
// enumerate all entries in keystore
// and find entry with given address
let to_remove = self.files_content()?
.into_iter()
.find(|&(_, ref acc)| acc.id == account.id && acc.address == account.address);
fn remove(&self, account: &SafeAccount) -> Result<(), Error> {
// enumerate all entries in keystore
// and find entry with given address
let to_remove = self
.files_content()?
.into_iter()
.find(|&(_, ref acc)| acc.id == account.id && acc.address == account.address);
// remove it
match to_remove {
None => Err(Error::InvalidAccount),
Some((path, _)) => fs::remove_file(path).map_err(From::from)
}
}
// remove it
match to_remove {
None => Err(Error::InvalidAccount),
Some((path, _)) => fs::remove_file(path).map_err(From::from),
}
}
fn path(&self) -> Option<&PathBuf> { Some(&self.path) }
fn path(&self) -> Option<&PathBuf> {
Some(&self.path)
}
fn as_vault_provider(&self) -> Option<&VaultKeyDirectoryProvider> {
Some(self)
}
fn as_vault_provider(&self) -> Option<&VaultKeyDirectoryProvider> {
Some(self)
}
fn unique_repr(&self) -> Result<u64, Error> {
self.last_modification_date()
}
fn unique_repr(&self) -> Result<u64, Error> {
self.last_modification_date()
}
}
impl<T> VaultKeyDirectoryProvider for DiskDirectory<T> where T: KeyFileManager {
fn create(&self, name: &str, key: VaultKey) -> Result<Box<VaultKeyDirectory>, Error> {
let vault_dir = VaultDiskDirectory::create(&self.path, name, key)?;
Ok(Box::new(vault_dir))
}
impl<T> VaultKeyDirectoryProvider for DiskDirectory<T>
where
T: KeyFileManager,
{
fn create(&self, name: &str, key: VaultKey) -> Result<Box<VaultKeyDirectory>, Error> {
let vault_dir = VaultDiskDirectory::create(&self.path, name, key)?;
Ok(Box::new(vault_dir))
}
fn open(&self, name: &str, key: VaultKey) -> Result<Box<VaultKeyDirectory>, Error> {
let vault_dir = VaultDiskDirectory::at(&self.path, name, key)?;
Ok(Box::new(vault_dir))
}
fn open(&self, name: &str, key: VaultKey) -> Result<Box<VaultKeyDirectory>, Error> {
let vault_dir = VaultDiskDirectory::at(&self.path, name, key)?;
Ok(Box::new(vault_dir))
}
fn list_vaults(&self) -> Result<Vec<String>, Error> {
Ok(fs::read_dir(&self.path)?
.filter_map(|e| e.ok().map(|e| e.path()))
.filter_map(|path| {
let mut vault_file_path = path.clone();
vault_file_path.push(VAULT_FILE_NAME);
if vault_file_path.is_file() {
path.file_name().and_then(|f| f.to_str()).map(|f| f.to_owned())
} else {
None
}
})
.collect())
}
fn list_vaults(&self) -> Result<Vec<String>, Error> {
Ok(fs::read_dir(&self.path)?
.filter_map(|e| e.ok().map(|e| e.path()))
.filter_map(|path| {
let mut vault_file_path = path.clone();
vault_file_path.push(VAULT_FILE_NAME);
if vault_file_path.is_file() {
path.file_name()
.and_then(|f| f.to_str())
.map(|f| f.to_owned())
} else {
None
}
})
.collect())
}
fn vault_meta(&self, name: &str) -> Result<String, Error> {
VaultDiskDirectory::meta_at(&self.path, name)
}
fn vault_meta(&self, name: &str) -> Result<String, Error> {
VaultDiskDirectory::meta_at(&self.path, name)
}
}
impl KeyFileManager for DiskKeyFileManager {
fn read<T>(&self, filename: Option<String>, reader: T) -> Result<SafeAccount, Error> where T: io::Read {
let key_file = json::KeyFile::load(reader).map_err(|e| Error::Custom(format!("{:?}", e)))?;
SafeAccount::from_file(key_file, filename, &self.password)
}
fn read<T>(&self, filename: Option<String>, reader: T) -> Result<SafeAccount, Error>
where
T: io::Read,
{
let key_file =
json::KeyFile::load(reader).map_err(|e| Error::Custom(format!("{:?}", e)))?;
SafeAccount::from_file(key_file, filename, &self.password)
}
fn write<T>(&self, mut account: SafeAccount, writer: &mut T) -> Result<(), Error> where T: io::Write {
// when account is moved back to root directory from vault
// => remove vault field from meta
account.meta = json::remove_vault_name_from_json_meta(&account.meta)
.map_err(|err| Error::Custom(format!("{:?}", err)))?;
fn write<T>(&self, mut account: SafeAccount, writer: &mut T) -> Result<(), Error>
where
T: io::Write,
{
// when account is moved back to root directory from vault
// => remove vault field from meta
account.meta = json::remove_vault_name_from_json_meta(&account.meta)
.map_err(|err| Error::Custom(format!("{:?}", err)))?;
let key_file: json::KeyFile = account.into();
key_file.write(writer).map_err(|e| Error::Custom(format!("{:?}", e)))
}
let key_file: json::KeyFile = account.into();
key_file
.write(writer)
.map_err(|e| Error::Custom(format!("{:?}", e)))
}
}
fn account_filename(account: &SafeAccount) -> String {
// build file path
account.filename.clone().unwrap_or_else(|| {
let timestamp = time::strftime("%Y-%m-%dT%H-%M-%S", &time::now_utc()).expect("Time-format string is valid.");
format!("UTC--{}Z--{}", timestamp, Uuid::from(account.id))
})
// build file path
account.filename.clone().unwrap_or_else(|| {
let timestamp = time::strftime("%Y-%m-%dT%H-%M-%S", &time::now_utc())
.expect("Time-format string is valid.");
format!("UTC--{}Z--{}", timestamp, Uuid::from(account.id))
})
}
#[cfg(test)]
mod test {
extern crate tempdir;
extern crate tempdir;
use std::{env, fs};
use std::num::NonZeroU32;
use super::{KeyDirectory, RootDiskDirectory, VaultKey};
use account::SafeAccount;
use ethkey::{Random, Generator};
use self::tempdir::TempDir;
use self::tempdir::TempDir;
use super::{KeyDirectory, RootDiskDirectory, VaultKey};
use account::SafeAccount;
use ethkey::{Generator, Random};
use std::{env, fs, num::NonZeroU32};
lazy_static! {
static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(1024).expect("1024 > 0; qed");
}
lazy_static! {
static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(1024).expect("1024 > 0; qed");
}
#[test]
fn should_create_new_account() {
// given
let mut dir = env::temp_dir();
dir.push("ethstore_should_create_new_account");
let keypair = Random.generate().unwrap();
let password = "hello world".into();
let directory = RootDiskDirectory::create(dir.clone()).unwrap();
#[test]
fn should_create_new_account() {
// given
let mut dir = env::temp_dir();
dir.push("ethstore_should_create_new_account");
let keypair = Random.generate().unwrap();
let password = "hello world".into();
let directory = RootDiskDirectory::create(dir.clone()).unwrap();
// when
let account = SafeAccount::create(&keypair, [0u8; 16], &password, *ITERATIONS, "Test".to_owned(), "{}".to_owned());
let res = directory.insert(account.unwrap());
// when
let account = SafeAccount::create(
&keypair,
[0u8; 16],
&password,
*ITERATIONS,
"Test".to_owned(),
"{}".to_owned(),
);
let res = directory.insert(account.unwrap());
// then
assert!(res.is_ok(), "Should save account succesfuly.");
assert!(res.unwrap().filename.is_some(), "Filename has been assigned.");
// then
assert!(res.is_ok(), "Should save account succesfuly.");
assert!(
res.unwrap().filename.is_some(),
"Filename has been assigned."
);
// cleanup
let _ = fs::remove_dir_all(dir);
}
// cleanup
let _ = fs::remove_dir_all(dir);
}
#[test]
fn should_handle_duplicate_filenames() {
// given
let mut dir = env::temp_dir();
dir.push("ethstore_should_handle_duplicate_filenames");
let keypair = Random.generate().unwrap();
let password = "hello world".into();
let directory = RootDiskDirectory::create(dir.clone()).unwrap();
#[test]
fn should_handle_duplicate_filenames() {
// given
let mut dir = env::temp_dir();
dir.push("ethstore_should_handle_duplicate_filenames");
let keypair = Random.generate().unwrap();
let password = "hello world".into();
let directory = RootDiskDirectory::create(dir.clone()).unwrap();
// when
let account = SafeAccount::create(&keypair, [0u8; 16], &password, *ITERATIONS, "Test".to_owned(), "{}".to_owned()).unwrap();
let filename = "test".to_string();
let dedup = true;
// when
let account = SafeAccount::create(
&keypair,
[0u8; 16],
&password,
*ITERATIONS,
"Test".to_owned(),
"{}".to_owned(),
)
.unwrap();
let filename = "test".to_string();
let dedup = true;
directory.insert_with_filename(account.clone(), "foo".to_string(), dedup).unwrap();
let file1 = directory.insert_with_filename(account.clone(), filename.clone(), dedup).unwrap().filename.unwrap();
let file2 = directory.insert_with_filename(account.clone(), filename.clone(), dedup).unwrap().filename.unwrap();
let file3 = directory.insert_with_filename(account.clone(), filename.clone(), dedup).unwrap().filename.unwrap();
directory
.insert_with_filename(account.clone(), "foo".to_string(), dedup)
.unwrap();
let file1 = directory
.insert_with_filename(account.clone(), filename.clone(), dedup)
.unwrap()
.filename
.unwrap();
let file2 = directory
.insert_with_filename(account.clone(), filename.clone(), dedup)
.unwrap()
.filename
.unwrap();
let file3 = directory
.insert_with_filename(account.clone(), filename.clone(), dedup)
.unwrap()
.filename
.unwrap();
// then
// the first file should have the original names
assert_eq!(file1, filename);
// then
// the first file should have the original names
assert_eq!(file1, filename);
// the following duplicate files should have a suffix appended
assert!(file2 != file3);
assert_eq!(file2.len(), filename.len() + 5);
assert_eq!(file3.len(), filename.len() + 5);
// the following duplicate files should have a suffix appended
assert!(file2 != file3);
assert_eq!(file2.len(), filename.len() + 5);
assert_eq!(file3.len(), filename.len() + 5);
// cleanup
let _ = fs::remove_dir_all(dir);
}
// cleanup
let _ = fs::remove_dir_all(dir);
}
#[test]
fn should_manage_vaults() {
// given
let mut dir = env::temp_dir();
dir.push("should_create_new_vault");
let directory = RootDiskDirectory::create(dir.clone()).unwrap();
let vault_name = "vault";
let password = "password".into();
#[test]
fn should_manage_vaults() {
// given
let mut dir = env::temp_dir();
dir.push("should_create_new_vault");
let directory = RootDiskDirectory::create(dir.clone()).unwrap();
let vault_name = "vault";
let password = "password".into();
// then
assert!(directory.as_vault_provider().is_some());
// then
assert!(directory.as_vault_provider().is_some());
// and when
let before_root_items_count = fs::read_dir(&dir).unwrap().count();
let vault = directory.as_vault_provider().unwrap().create(vault_name, VaultKey::new(&password, *ITERATIONS));
// and when
let before_root_items_count = fs::read_dir(&dir).unwrap().count();
let vault = directory
.as_vault_provider()
.unwrap()
.create(vault_name, VaultKey::new(&password, *ITERATIONS));
// then
assert!(vault.is_ok());
let after_root_items_count = fs::read_dir(&dir).unwrap().count();
assert!(after_root_items_count > before_root_items_count);
// then
assert!(vault.is_ok());
let after_root_items_count = fs::read_dir(&dir).unwrap().count();
assert!(after_root_items_count > before_root_items_count);
// and when
let vault = directory.as_vault_provider().unwrap().open(vault_name, VaultKey::new(&password, *ITERATIONS));
// and when
let vault = directory
.as_vault_provider()
.unwrap()
.open(vault_name, VaultKey::new(&password, *ITERATIONS));
// then
assert!(vault.is_ok());
let after_root_items_count2 = fs::read_dir(&dir).unwrap().count();
assert!(after_root_items_count == after_root_items_count2);
// then
assert!(vault.is_ok());
let after_root_items_count2 = fs::read_dir(&dir).unwrap().count();
assert!(after_root_items_count == after_root_items_count2);
// cleanup
let _ = fs::remove_dir_all(dir);
}
// cleanup
let _ = fs::remove_dir_all(dir);
}
#[test]
fn should_list_vaults() {
// given
let temp_path = TempDir::new("").unwrap();
let directory = RootDiskDirectory::create(&temp_path).unwrap();
let vault_provider = directory.as_vault_provider().unwrap();
let iter = NonZeroU32::new(1).expect("1 > 0; qed");
vault_provider.create("vault1", VaultKey::new(&"password1".into(), iter)).unwrap();
vault_provider.create("vault2", VaultKey::new(&"password2".into(), iter)).unwrap();
#[test]
fn should_list_vaults() {
// given
let temp_path = TempDir::new("").unwrap();
let directory = RootDiskDirectory::create(&temp_path).unwrap();
let vault_provider = directory.as_vault_provider().unwrap();
let iter = NonZeroU32::new(1).expect("1 > 0; qed");
vault_provider
.create("vault1", VaultKey::new(&"password1".into(), iter))
.unwrap();
vault_provider
.create("vault2", VaultKey::new(&"password2".into(), iter))
.unwrap();
// then
let vaults = vault_provider.list_vaults().unwrap();
assert_eq!(vaults.len(), 2);
assert!(vaults.iter().any(|v| &*v == "vault1"));
assert!(vaults.iter().any(|v| &*v == "vault2"));
}
// then
let vaults = vault_provider.list_vaults().unwrap();
assert_eq!(vaults.len(), 2);
assert!(vaults.iter().any(|v| &*v == "vault1"));
assert!(vaults.iter().any(|v| &*v == "vault2"));
}
#[test]
fn hash_of_files() {
let temp_path = TempDir::new("").unwrap();
let directory = RootDiskDirectory::create(&temp_path).unwrap();
#[test]
fn hash_of_files() {
let temp_path = TempDir::new("").unwrap();
let directory = RootDiskDirectory::create(&temp_path).unwrap();
let hash = directory.files_hash().expect("Files hash should be calculated ok");
assert_eq!(
hash,
15130871412783076140
);
let hash = directory
.files_hash()
.expect("Files hash should be calculated ok");
assert_eq!(hash, 15130871412783076140);
let keypair = Random.generate().unwrap();
let password = "test pass".into();
let account = SafeAccount::create(&keypair, [0u8; 16], &password, *ITERATIONS, "Test".to_owned(), "{}".to_owned());
directory.insert(account.unwrap()).expect("Account should be inserted ok");
let keypair = Random.generate().unwrap();
let password = "test pass".into();
let account = SafeAccount::create(
&keypair,
[0u8; 16],
&password,
*ITERATIONS,
"Test".to_owned(),
"{}".to_owned(),
);
directory
.insert(account.unwrap())
.expect("Account should be inserted ok");
let new_hash = directory.files_hash().expect("New files hash should be calculated ok");
let new_hash = directory
.files_hash()
.expect("New files hash should be calculated ok");
assert!(new_hash != hash, "hash of the file list should change once directory content changed");
}
assert!(
new_hash != hash,
"hash of the file list should change once directory content changed"
);
}
}

View File

@ -14,61 +14,64 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::collections::HashMap;
use parking_lot::RwLock;
use itertools;
use ethkey::Address;
use itertools;
use parking_lot::RwLock;
use std::collections::HashMap;
use {SafeAccount, Error};
use super::KeyDirectory;
use Error;
use SafeAccount;
/// Accounts in-memory storage.
#[derive(Default)]
pub struct MemoryDirectory {
accounts: RwLock<HashMap<Address, Vec<SafeAccount>>>,
accounts: RwLock<HashMap<Address, Vec<SafeAccount>>>,
}
impl KeyDirectory for MemoryDirectory {
fn load(&self) -> Result<Vec<SafeAccount>, Error> {
Ok(itertools::Itertools::flatten(self.accounts.read().values().cloned()).collect())
}
fn load(&self) -> Result<Vec<SafeAccount>, Error> {
Ok(itertools::Itertools::flatten(self.accounts.read().values().cloned()).collect())
}
fn update(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
let mut lock = self.accounts.write();
let accounts = lock.entry(account.address.clone()).or_insert_with(Vec::new);
// If the filename is the same we just need to replace the entry
accounts.retain(|acc| acc.filename != account.filename);
accounts.push(account.clone());
Ok(account)
}
fn update(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
let mut lock = self.accounts.write();
let accounts = lock.entry(account.address.clone()).or_insert_with(Vec::new);
// If the filename is the same we just need to replace the entry
accounts.retain(|acc| acc.filename != account.filename);
accounts.push(account.clone());
Ok(account)
}
fn insert(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
let mut lock = self.accounts.write();
let accounts = lock.entry(account.address.clone()).or_insert_with(Vec::new);
accounts.push(account.clone());
Ok(account)
}
fn insert(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
let mut lock = self.accounts.write();
let accounts = lock.entry(account.address.clone()).or_insert_with(Vec::new);
accounts.push(account.clone());
Ok(account)
}
fn remove(&self, account: &SafeAccount) -> Result<(), Error> {
let mut accounts = self.accounts.write();
let is_empty = if let Some(accounts) = accounts.get_mut(&account.address) {
if let Some(position) = accounts.iter().position(|acc| acc == account) {
accounts.remove(position);
}
accounts.is_empty()
} else {
false
};
if is_empty {
accounts.remove(&account.address);
}
Ok(())
}
fn remove(&self, account: &SafeAccount) -> Result<(), Error> {
let mut accounts = self.accounts.write();
let is_empty = if let Some(accounts) = accounts.get_mut(&account.address) {
if let Some(position) = accounts.iter().position(|acc| acc == account) {
accounts.remove(position);
}
accounts.is_empty()
} else {
false
};
if is_empty {
accounts.remove(&account.address);
}
Ok(())
}
fn unique_repr(&self) -> Result<u64, Error> {
let mut val = 0u64;
let accounts = self.accounts.read();
for acc in accounts.keys() { val = val ^ acc.low_u64() }
Ok(val)
}
fn unique_repr(&self) -> Result<u64, Error> {
let mut val = 0u64;
let accounts = self.accounts.read();
for acc in accounts.keys() {
val = val ^ acc.low_u64()
}
Ok(val)
}
}

View File

@ -17,9 +17,9 @@
//! Accounts Directory
use ethkey::Password;
use std::num::NonZeroU32;
use std::path::{PathBuf};
use {SafeAccount, Error};
use std::{num::NonZeroU32, path::PathBuf};
use Error;
use SafeAccount;
mod disk;
mod memory;
@ -28,79 +28,85 @@ mod vault;
/// `VaultKeyDirectory::set_key` error
#[derive(Debug)]
pub enum SetKeyError {
/// Error is fatal and directory is probably in inconsistent state
Fatal(Error),
/// Error is non fatal, directory is reverted to pre-operation state
NonFatalOld(Error),
/// Error is non fatal, directory is consistent with new key
NonFatalNew(Error),
/// Error is fatal and directory is probably in inconsistent state
Fatal(Error),
/// Error is non fatal, directory is reverted to pre-operation state
NonFatalOld(Error),
/// Error is non fatal, directory is consistent with new key
NonFatalNew(Error),
}
/// Vault key
#[derive(Clone, PartialEq, Eq)]
pub struct VaultKey {
/// Vault password
pub password: Password,
/// Number of iterations to produce a derived key from password
pub iterations: NonZeroU32,
/// Vault password
pub password: Password,
/// Number of iterations to produce a derived key from password
pub iterations: NonZeroU32,
}
/// Keys directory
pub trait KeyDirectory: Send + Sync {
/// Read keys from directory
fn load(&self) -> Result<Vec<SafeAccount>, Error>;
/// Insert new key to directory
fn insert(&self, account: SafeAccount) -> Result<SafeAccount, Error>;
/// Update key in the directory
fn update(&self, account: SafeAccount) -> Result<SafeAccount, Error>;
/// Remove key from directory
fn remove(&self, account: &SafeAccount) -> Result<(), Error>;
/// Get directory filesystem path, if available
fn path(&self) -> Option<&PathBuf> { None }
/// Return vault provider, if available
fn as_vault_provider(&self) -> Option<&VaultKeyDirectoryProvider> { None }
/// Unique representation of directory account collection
fn unique_repr(&self) -> Result<u64, Error>;
/// Read keys from directory
fn load(&self) -> Result<Vec<SafeAccount>, Error>;
/// Insert new key to directory
fn insert(&self, account: SafeAccount) -> Result<SafeAccount, Error>;
/// Update key in the directory
fn update(&self, account: SafeAccount) -> Result<SafeAccount, Error>;
/// Remove key from directory
fn remove(&self, account: &SafeAccount) -> Result<(), Error>;
/// Get directory filesystem path, if available
fn path(&self) -> Option<&PathBuf> {
None
}
/// Return vault provider, if available
fn as_vault_provider(&self) -> Option<&VaultKeyDirectoryProvider> {
None
}
/// Unique representation of directory account collection
fn unique_repr(&self) -> Result<u64, Error>;
}
/// Vaults provider
pub trait VaultKeyDirectoryProvider {
/// Create new vault with given key
fn create(&self, name: &str, key: VaultKey) -> Result<Box<VaultKeyDirectory>, Error>;
/// Open existing vault with given key
fn open(&self, name: &str, key: VaultKey) -> Result<Box<VaultKeyDirectory>, Error>;
/// List all vaults
fn list_vaults(&self) -> Result<Vec<String>, Error>;
/// Get vault meta
fn vault_meta(&self, name: &str) -> Result<String, Error>;
/// Create new vault with given key
fn create(&self, name: &str, key: VaultKey) -> Result<Box<VaultKeyDirectory>, Error>;
/// Open existing vault with given key
fn open(&self, name: &str, key: VaultKey) -> Result<Box<VaultKeyDirectory>, Error>;
/// List all vaults
fn list_vaults(&self) -> Result<Vec<String>, Error>;
/// Get vault meta
fn vault_meta(&self, name: &str) -> Result<String, Error>;
}
/// Vault directory
pub trait VaultKeyDirectory: KeyDirectory {
/// Cast to `KeyDirectory`
fn as_key_directory(&self) -> &KeyDirectory;
/// Vault name
fn name(&self) -> &str;
/// Get vault key
fn key(&self) -> VaultKey;
/// Set new key for vault
fn set_key(&self, key: VaultKey) -> Result<(), SetKeyError>;
/// Get vault meta
fn meta(&self) -> String;
/// Set vault meta
fn set_meta(&self, meta: &str) -> Result<(), Error>;
/// Cast to `KeyDirectory`
fn as_key_directory(&self) -> &KeyDirectory;
/// Vault name
fn name(&self) -> &str;
/// Get vault key
fn key(&self) -> VaultKey;
/// Set new key for vault
fn set_key(&self, key: VaultKey) -> Result<(), SetKeyError>;
/// Get vault meta
fn meta(&self) -> String;
/// Set vault meta
fn set_meta(&self, meta: &str) -> Result<(), Error>;
}
pub use self::disk::{RootDiskDirectory, DiskKeyFileManager, KeyFileManager};
pub use self::memory::MemoryDirectory;
pub use self::vault::VaultDiskDirectory;
pub use self::{
disk::{DiskKeyFileManager, KeyFileManager, RootDiskDirectory},
memory::MemoryDirectory,
vault::VaultDiskDirectory,
};
impl VaultKey {
/// Create new vault key
pub fn new(password: &Password, iterations: NonZeroU32) -> Self {
VaultKey {
password: password.clone(),
iterations: iterations,
}
}
/// Create new vault key
pub fn new(password: &Password, iterations: NonZeroU32) -> Self {
VaultKey {
password: password.clone(),
iterations: iterations,
}
}
}

View File

@ -14,14 +14,20 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::{fs, io};
use std::path::{PathBuf, Path};
use parking_lot::Mutex;
use {json, SafeAccount, Error};
use super::{
super::account::Crypto,
disk::{self, DiskDirectory, KeyFileManager},
KeyDirectory, SetKeyError, VaultKey, VaultKeyDirectory,
};
use crypto::Keccak256;
use super::super::account::Crypto;
use super::{KeyDirectory, VaultKeyDirectory, VaultKey, SetKeyError};
use super::disk::{self, DiskDirectory, KeyFileManager};
use json;
use parking_lot::Mutex;
use std::{
fs, io,
path::{Path, PathBuf},
};
use Error;
use SafeAccount;
/// Name of vault metadata file
pub const VAULT_FILE_NAME: &'static str = "vault.json";
@ -33,417 +39,489 @@ pub type VaultDiskDirectory = DiskDirectory<VaultKeyFileManager>;
/// Vault key file manager
pub struct VaultKeyFileManager {
name: String,
key: VaultKey,
meta: Mutex<String>,
name: String,
key: VaultKey,
meta: Mutex<String>,
}
impl VaultDiskDirectory {
/// Create new vault directory with given key
pub fn create<P>(root: P, name: &str, key: VaultKey) -> Result<Self, Error> where P: AsRef<Path> {
// check that vault directory does not exists
let vault_dir_path = make_vault_dir_path(root, name, true)?;
if vault_dir_path.exists() {
return Err(Error::CreationFailed);
}
/// Create new vault directory with given key
pub fn create<P>(root: P, name: &str, key: VaultKey) -> Result<Self, Error>
where
P: AsRef<Path>,
{
// check that vault directory does not exists
let vault_dir_path = make_vault_dir_path(root, name, true)?;
if vault_dir_path.exists() {
return Err(Error::CreationFailed);
}
// create vault && vault file
let vault_meta = "{}";
fs::create_dir_all(&vault_dir_path)?;
if let Err(err) = create_vault_file(&vault_dir_path, &key, vault_meta) {
let _ = fs::remove_dir_all(&vault_dir_path); // can't do anything with this
return Err(err);
}
// create vault && vault file
let vault_meta = "{}";
fs::create_dir_all(&vault_dir_path)?;
if let Err(err) = create_vault_file(&vault_dir_path, &key, vault_meta) {
let _ = fs::remove_dir_all(&vault_dir_path); // can't do anything with this
return Err(err);
}
Ok(DiskDirectory::new(vault_dir_path, VaultKeyFileManager::new(name, key, vault_meta)))
}
Ok(DiskDirectory::new(
vault_dir_path,
VaultKeyFileManager::new(name, key, vault_meta),
))
}
/// Open existing vault directory with given key
pub fn at<P>(root: P, name: &str, key: VaultKey) -> Result<Self, Error> where P: AsRef<Path> {
// check that vault directory exists
let vault_dir_path = make_vault_dir_path(root, name, true)?;
if !vault_dir_path.is_dir() {
return Err(Error::CreationFailed);
}
/// Open existing vault directory with given key
pub fn at<P>(root: P, name: &str, key: VaultKey) -> Result<Self, Error>
where
P: AsRef<Path>,
{
// check that vault directory exists
let vault_dir_path = make_vault_dir_path(root, name, true)?;
if !vault_dir_path.is_dir() {
return Err(Error::CreationFailed);
}
// check that passed key matches vault file
let meta = read_vault_file(&vault_dir_path, Some(&key))?;
// check that passed key matches vault file
let meta = read_vault_file(&vault_dir_path, Some(&key))?;
Ok(DiskDirectory::new(vault_dir_path, VaultKeyFileManager::new(name, key, &meta)))
}
Ok(DiskDirectory::new(
vault_dir_path,
VaultKeyFileManager::new(name, key, &meta),
))
}
/// Read vault meta without actually opening the vault
pub fn meta_at<P>(root: P, name: &str) -> Result<String, Error> where P: AsRef<Path> {
// check that vault directory exists
let vault_dir_path = make_vault_dir_path(root, name, true)?;
if !vault_dir_path.is_dir() {
return Err(Error::VaultNotFound);
}
/// Read vault meta without actually opening the vault
pub fn meta_at<P>(root: P, name: &str) -> Result<String, Error>
where
P: AsRef<Path>,
{
// check that vault directory exists
let vault_dir_path = make_vault_dir_path(root, name, true)?;
if !vault_dir_path.is_dir() {
return Err(Error::VaultNotFound);
}
// check that passed key matches vault file
read_vault_file(&vault_dir_path, None)
}
// check that passed key matches vault file
read_vault_file(&vault_dir_path, None)
}
fn create_temp_vault(&self, key: VaultKey) -> Result<VaultDiskDirectory, Error> {
let original_path = self.path().expect("self is instance of DiskDirectory; DiskDirectory always returns path; qed");
let mut path: PathBuf = original_path.clone();
let name = self.name();
fn create_temp_vault(&self, key: VaultKey) -> Result<VaultDiskDirectory, Error> {
let original_path = self
.path()
.expect("self is instance of DiskDirectory; DiskDirectory always returns path; qed");
let mut path: PathBuf = original_path.clone();
let name = self.name();
path.push(name); // to jump to the next level
path.push(name); // to jump to the next level
let mut index = 0;
loop {
let name = format!("{}_temp_{}", name, index);
path.set_file_name(&name);
if !path.exists() {
return VaultDiskDirectory::create(original_path, &name, key);
}
let mut index = 0;
loop {
let name = format!("{}_temp_{}", name, index);
path.set_file_name(&name);
if !path.exists() {
return VaultDiskDirectory::create(original_path, &name, key);
}
index += 1;
}
}
index += 1;
}
}
fn copy_to_vault(&self, vault: &VaultDiskDirectory) -> Result<(), Error> {
for account in self.load()? {
let filename = account.filename.clone().expect("self is instance of DiskDirectory; DiskDirectory fills filename in load; qed");
vault.insert_with_filename(account, filename, true)?;
}
fn copy_to_vault(&self, vault: &VaultDiskDirectory) -> Result<(), Error> {
for account in self.load()? {
let filename = account.filename.clone().expect(
"self is instance of DiskDirectory; DiskDirectory fills filename in load; qed",
);
vault.insert_with_filename(account, filename, true)?;
}
Ok(())
}
Ok(())
}
fn delete(&self) -> Result<(), Error> {
let path = self.path().expect("self is instance of DiskDirectory; DiskDirectory always returns path; qed");
fs::remove_dir_all(path).map_err(Into::into)
}
fn delete(&self) -> Result<(), Error> {
let path = self
.path()
.expect("self is instance of DiskDirectory; DiskDirectory always returns path; qed");
fs::remove_dir_all(path).map_err(Into::into)
}
}
impl VaultKeyDirectory for VaultDiskDirectory {
fn as_key_directory(&self) -> &KeyDirectory {
self
}
fn as_key_directory(&self) -> &KeyDirectory {
self
}
fn name(&self) -> &str {
&self.key_manager().name
}
fn name(&self) -> &str {
&self.key_manager().name
}
fn key(&self) -> VaultKey {
self.key_manager().key.clone()
}
fn key(&self) -> VaultKey {
self.key_manager().key.clone()
}
fn set_key(&self, new_key: VaultKey) -> Result<(), SetKeyError> {
let temp_vault = VaultDiskDirectory::create_temp_vault(self, new_key.clone()).map_err(|err| SetKeyError::NonFatalOld(err))?;
let mut source_path = temp_vault.path().expect("temp_vault is instance of DiskDirectory; DiskDirectory always returns path; qed").clone();
let mut target_path = self.path().expect("self is instance of DiskDirectory; DiskDirectory always returns path; qed").clone();
fn set_key(&self, new_key: VaultKey) -> Result<(), SetKeyError> {
let temp_vault = VaultDiskDirectory::create_temp_vault(self, new_key.clone())
.map_err(|err| SetKeyError::NonFatalOld(err))?;
let mut source_path = temp_vault
.path()
.expect(
"temp_vault is instance of DiskDirectory; DiskDirectory always returns path; qed",
)
.clone();
let mut target_path = self
.path()
.expect("self is instance of DiskDirectory; DiskDirectory always returns path; qed")
.clone();
// preserve meta
temp_vault.set_meta(&self.meta()).map_err(SetKeyError::NonFatalOld)?;
// preserve meta
temp_vault
.set_meta(&self.meta())
.map_err(SetKeyError::NonFatalOld)?;
// jump to next fs level
source_path.push("next");
target_path.push("next");
// jump to next fs level
source_path.push("next");
target_path.push("next");
let temp_accounts = self.copy_to_vault(&temp_vault)
.and_then(|_| temp_vault.load())
.map_err(|err| {
// ignore error, as we already processing error
let _ = temp_vault.delete();
SetKeyError::NonFatalOld(err)
})?;
let temp_accounts = self
.copy_to_vault(&temp_vault)
.and_then(|_| temp_vault.load())
.map_err(|err| {
// ignore error, as we already processing error
let _ = temp_vault.delete();
SetKeyError::NonFatalOld(err)
})?;
// we can't just delete temp vault until all files moved, because
// original vault content has already been partially replaced
// => when error or crash happens here, we can't do anything
for temp_account in temp_accounts {
let filename = temp_account.filename.expect("self is instance of DiskDirectory; DiskDirectory fills filename in load; qed");
source_path.set_file_name(&filename);
target_path.set_file_name(&filename);
fs::rename(&source_path, &target_path).map_err(|err| SetKeyError::Fatal(err.into()))?;
}
source_path.set_file_name(VAULT_FILE_NAME);
target_path.set_file_name(VAULT_FILE_NAME);
fs::rename(source_path, target_path).map_err(|err| SetKeyError::Fatal(err.into()))?;
// we can't just delete temp vault until all files moved, because
// original vault content has already been partially replaced
// => when error or crash happens here, we can't do anything
for temp_account in temp_accounts {
let filename = temp_account.filename.expect(
"self is instance of DiskDirectory; DiskDirectory fills filename in load; qed",
);
source_path.set_file_name(&filename);
target_path.set_file_name(&filename);
fs::rename(&source_path, &target_path).map_err(|err| SetKeyError::Fatal(err.into()))?;
}
source_path.set_file_name(VAULT_FILE_NAME);
target_path.set_file_name(VAULT_FILE_NAME);
fs::rename(source_path, target_path).map_err(|err| SetKeyError::Fatal(err.into()))?;
temp_vault.delete().map_err(|err| SetKeyError::NonFatalNew(err))
}
temp_vault
.delete()
.map_err(|err| SetKeyError::NonFatalNew(err))
}
fn meta(&self) -> String {
self.key_manager().meta.lock().clone()
}
fn meta(&self) -> String {
self.key_manager().meta.lock().clone()
}
fn set_meta(&self, meta: &str) -> Result<(), Error> {
let key_manager = self.key_manager();
let vault_path = self.path().expect("self is instance of DiskDirectory; DiskDirectory always returns path; qed");
create_vault_file(vault_path, &key_manager.key, meta)?;
*key_manager.meta.lock() = meta.to_owned();
Ok(())
}
fn set_meta(&self, meta: &str) -> Result<(), Error> {
let key_manager = self.key_manager();
let vault_path = self
.path()
.expect("self is instance of DiskDirectory; DiskDirectory always returns path; qed");
create_vault_file(vault_path, &key_manager.key, meta)?;
*key_manager.meta.lock() = meta.to_owned();
Ok(())
}
}
impl VaultKeyFileManager {
pub fn new(name: &str, key: VaultKey, meta: &str) -> Self {
VaultKeyFileManager {
name: name.into(),
key: key,
meta: Mutex::new(meta.to_owned()),
}
}
pub fn new(name: &str, key: VaultKey, meta: &str) -> Self {
VaultKeyFileManager {
name: name.into(),
key: key,
meta: Mutex::new(meta.to_owned()),
}
}
}
impl KeyFileManager for VaultKeyFileManager {
fn read<T>(&self, filename: Option<String>, reader: T) -> Result<SafeAccount, Error> where T: io::Read {
let vault_file = json::VaultKeyFile::load(reader).map_err(|e| Error::Custom(format!("{:?}", e)))?;
let mut safe_account = SafeAccount::from_vault_file(&self.key.password, vault_file, filename.clone())?;
fn read<T>(&self, filename: Option<String>, reader: T) -> Result<SafeAccount, Error>
where
T: io::Read,
{
let vault_file =
json::VaultKeyFile::load(reader).map_err(|e| Error::Custom(format!("{:?}", e)))?;
let mut safe_account =
SafeAccount::from_vault_file(&self.key.password, vault_file, filename.clone())?;
safe_account.meta = json::insert_vault_name_to_json_meta(&safe_account.meta, &self.name)
.map_err(|err| Error::Custom(format!("{:?}", err)))?;
Ok(safe_account)
}
safe_account.meta = json::insert_vault_name_to_json_meta(&safe_account.meta, &self.name)
.map_err(|err| Error::Custom(format!("{:?}", err)))?;
Ok(safe_account)
}
fn write<T>(&self, mut account: SafeAccount, writer: &mut T) -> Result<(), Error> where T: io::Write {
account.meta = json::remove_vault_name_from_json_meta(&account.meta)
.map_err(|err| Error::Custom(format!("{:?}", err)))?;
fn write<T>(&self, mut account: SafeAccount, writer: &mut T) -> Result<(), Error>
where
T: io::Write,
{
account.meta = json::remove_vault_name_from_json_meta(&account.meta)
.map_err(|err| Error::Custom(format!("{:?}", err)))?;
let vault_file: json::VaultKeyFile = account.into_vault_file(self.key.iterations, &self.key.password)?;
vault_file.write(writer).map_err(|e| Error::Custom(format!("{:?}", e)))
}
let vault_file: json::VaultKeyFile =
account.into_vault_file(self.key.iterations, &self.key.password)?;
vault_file
.write(writer)
.map_err(|e| Error::Custom(format!("{:?}", e)))
}
}
/// Makes path to vault directory, checking that vault name is appropriate
fn make_vault_dir_path<P>(root: P, name: &str, check_name: bool) -> Result<PathBuf, Error> where P: AsRef<Path> {
// check vault name
if check_name && !check_vault_name(name) {
return Err(Error::InvalidVaultName);
}
fn make_vault_dir_path<P>(root: P, name: &str, check_name: bool) -> Result<PathBuf, Error>
where
P: AsRef<Path>,
{
// check vault name
if check_name && !check_vault_name(name) {
return Err(Error::InvalidVaultName);
}
let mut vault_dir_path: PathBuf = root.as_ref().into();
vault_dir_path.push(name);
Ok(vault_dir_path)
let mut vault_dir_path: PathBuf = root.as_ref().into();
vault_dir_path.push(name);
Ok(vault_dir_path)
}
/// Every vault must have unique name => we rely on filesystem to check this
/// => vault name must not contain any fs-special characters to avoid directory traversal
/// => we only allow alphanumeric + separator characters in vault name.
fn check_vault_name(name: &str) -> bool {
!name.is_empty()
&& name.chars()
.all(|c| c.is_alphanumeric()
|| c.is_whitespace()
|| c == '-' || c == '_')
!name.is_empty()
&& name
.chars()
.all(|c| c.is_alphanumeric() || c.is_whitespace() || c == '-' || c == '_')
}
/// Vault can be empty, but still must be pluggable => we store vault password in separate file
fn create_vault_file<P>(vault_dir_path: P, key: &VaultKey, meta: &str) -> Result<(), Error> where P: AsRef<Path> {
let password_hash = key.password.as_bytes().keccak256();
let crypto = Crypto::with_plain(&password_hash, &key.password, key.iterations)?;
fn create_vault_file<P>(vault_dir_path: P, key: &VaultKey, meta: &str) -> Result<(), Error>
where
P: AsRef<Path>,
{
let password_hash = key.password.as_bytes().keccak256();
let crypto = Crypto::with_plain(&password_hash, &key.password, key.iterations)?;
let vault_file_path = vault_dir_path.as_ref().join(VAULT_FILE_NAME);
let temp_vault_file_name = disk::find_unique_filename_using_random_suffix(vault_dir_path.as_ref(), &VAULT_TEMP_FILE_NAME)?;
let temp_vault_file_path = vault_dir_path.as_ref().join(&temp_vault_file_name);
let vault_file_path = vault_dir_path.as_ref().join(VAULT_FILE_NAME);
let temp_vault_file_name = disk::find_unique_filename_using_random_suffix(
vault_dir_path.as_ref(),
&VAULT_TEMP_FILE_NAME,
)?;
let temp_vault_file_path = vault_dir_path.as_ref().join(&temp_vault_file_name);
// this method is used to rewrite existing vault file
// => write to temporary file first, then rename temporary file to vault file
let mut vault_file = disk::create_new_file_with_permissions_to_owner(&temp_vault_file_path)?;
let vault_file_contents = json::VaultFile {
crypto: crypto.into(),
meta: Some(meta.to_owned()),
};
vault_file_contents.write(&mut vault_file).map_err(|e| Error::Custom(format!("{:?}", e)))?;
drop(vault_file);
fs::rename(&temp_vault_file_path, &vault_file_path)?;
// this method is used to rewrite existing vault file
// => write to temporary file first, then rename temporary file to vault file
let mut vault_file = disk::create_new_file_with_permissions_to_owner(&temp_vault_file_path)?;
let vault_file_contents = json::VaultFile {
crypto: crypto.into(),
meta: Some(meta.to_owned()),
};
vault_file_contents
.write(&mut vault_file)
.map_err(|e| Error::Custom(format!("{:?}", e)))?;
drop(vault_file);
fs::rename(&temp_vault_file_path, &vault_file_path)?;
Ok(())
Ok(())
}
/// When vault is opened => we must check that password matches && read metadata
fn read_vault_file<P>(vault_dir_path: P, key: Option<&VaultKey>) -> Result<String, Error> where P: AsRef<Path> {
let mut vault_file_path: PathBuf = vault_dir_path.as_ref().into();
vault_file_path.push(VAULT_FILE_NAME);
fn read_vault_file<P>(vault_dir_path: P, key: Option<&VaultKey>) -> Result<String, Error>
where
P: AsRef<Path>,
{
let mut vault_file_path: PathBuf = vault_dir_path.as_ref().into();
vault_file_path.push(VAULT_FILE_NAME);
let vault_file = fs::File::open(vault_file_path)?;
let vault_file_contents = json::VaultFile::load(vault_file).map_err(|e| Error::Custom(format!("{:?}", e)))?;
let vault_file_meta = vault_file_contents.meta.unwrap_or("{}".to_owned());
let vault_file_crypto: Crypto = vault_file_contents.crypto.into();
let vault_file = fs::File::open(vault_file_path)?;
let vault_file_contents =
json::VaultFile::load(vault_file).map_err(|e| Error::Custom(format!("{:?}", e)))?;
let vault_file_meta = vault_file_contents.meta.unwrap_or("{}".to_owned());
let vault_file_crypto: Crypto = vault_file_contents.crypto.into();
if let Some(key) = key {
let password_bytes = vault_file_crypto.decrypt(&key.password)?;
let password_hash = key.password.as_bytes().keccak256();
if password_hash != password_bytes.as_slice() {
return Err(Error::InvalidPassword);
}
}
if let Some(key) = key {
let password_bytes = vault_file_crypto.decrypt(&key.password)?;
let password_hash = key.password.as_bytes().keccak256();
if password_hash != password_bytes.as_slice() {
return Err(Error::InvalidPassword);
}
}
Ok(vault_file_meta)
Ok(vault_file_meta)
}
#[cfg(test)]
mod test {
extern crate tempdir;
extern crate tempdir;
use std::fs;
use std::io::Write;
use std::num::NonZeroU32;
use std::path::PathBuf;
use super::VaultKey;
use super::{VAULT_FILE_NAME, check_vault_name, make_vault_dir_path, create_vault_file, read_vault_file, VaultDiskDirectory};
use self::tempdir::TempDir;
use self::tempdir::TempDir;
use super::{
check_vault_name, create_vault_file, make_vault_dir_path, read_vault_file,
VaultDiskDirectory, VaultKey, VAULT_FILE_NAME,
};
use std::{fs, io::Write, num::NonZeroU32, path::PathBuf};
lazy_static! {
static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(1024).expect("1024 > 0; qed");
}
lazy_static! {
static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(1024).expect("1024 > 0; qed");
}
#[test]
fn check_vault_name_succeeds() {
assert!(check_vault_name("vault"));
assert!(check_vault_name("vault with spaces"));
assert!(check_vault_name("vault with tabs"));
assert!(check_vault_name("vault_with_underscores"));
assert!(check_vault_name("vault-with-dashes"));
assert!(check_vault_name("vault-with-digits-123"));
assert!(check_vault_name("vault中文名字"));
}
#[test]
fn check_vault_name_succeeds() {
assert!(check_vault_name("vault"));
assert!(check_vault_name("vault with spaces"));
assert!(check_vault_name("vault with tabs"));
assert!(check_vault_name("vault_with_underscores"));
assert!(check_vault_name("vault-with-dashes"));
assert!(check_vault_name("vault-with-digits-123"));
assert!(check_vault_name("vault中文名字"));
}
#[test]
fn check_vault_name_fails() {
assert!(!check_vault_name(""));
assert!(!check_vault_name("."));
assert!(!check_vault_name("*"));
assert!(!check_vault_name("../.bash_history"));
assert!(!check_vault_name("/etc/passwd"));
assert!(!check_vault_name("c:\\windows"));
}
#[test]
fn check_vault_name_fails() {
assert!(!check_vault_name(""));
assert!(!check_vault_name("."));
assert!(!check_vault_name("*"));
assert!(!check_vault_name("../.bash_history"));
assert!(!check_vault_name("/etc/passwd"));
assert!(!check_vault_name("c:\\windows"));
}
#[test]
fn make_vault_dir_path_succeeds() {
use std::path::Path;
#[test]
fn make_vault_dir_path_succeeds() {
use std::path::Path;
assert_eq!(
&make_vault_dir_path("/home/user/parity", "vault", true).unwrap(),
&Path::new("/home/user/parity/vault")
);
assert_eq!(
&make_vault_dir_path("/home/user/parity", "*bad-name*", false).unwrap(),
&Path::new("/home/user/parity/*bad-name*")
);
}
assert_eq!(&make_vault_dir_path("/home/user/parity", "vault", true).unwrap(), &Path::new("/home/user/parity/vault"));
assert_eq!(&make_vault_dir_path("/home/user/parity", "*bad-name*", false).unwrap(), &Path::new("/home/user/parity/*bad-name*"));
}
#[test]
fn make_vault_dir_path_fails() {
assert!(make_vault_dir_path("/home/user/parity", "*bad-name*", true).is_err());
}
#[test]
fn make_vault_dir_path_fails() {
assert!(make_vault_dir_path("/home/user/parity", "*bad-name*", true).is_err());
}
#[test]
fn create_vault_file_succeeds() {
// given
let temp_path = TempDir::new("").unwrap();
let key = VaultKey::new(&"password".into(), *ITERATIONS);
let mut vault_dir: PathBuf = temp_path.path().into();
vault_dir.push("vault");
fs::create_dir_all(&vault_dir).unwrap();
#[test]
fn create_vault_file_succeeds() {
// given
let temp_path = TempDir::new("").unwrap();
let key = VaultKey::new(&"password".into(), *ITERATIONS);
let mut vault_dir: PathBuf = temp_path.path().into();
vault_dir.push("vault");
fs::create_dir_all(&vault_dir).unwrap();
// when
let result = create_vault_file(&vault_dir, &key, "{}");
// when
let result = create_vault_file(&vault_dir, &key, "{}");
// then
assert!(result.is_ok());
let mut vault_file_path = vault_dir.clone();
vault_file_path.push(VAULT_FILE_NAME);
assert!(vault_file_path.exists() && vault_file_path.is_file());
}
// then
assert!(result.is_ok());
let mut vault_file_path = vault_dir.clone();
vault_file_path.push(VAULT_FILE_NAME);
assert!(vault_file_path.exists() && vault_file_path.is_file());
}
#[test]
fn read_vault_file_succeeds() {
// given
let temp_path = TempDir::new("").unwrap();
let key = VaultKey::new(&"password".into(), *ITERATIONS);
let vault_file_contents = r#"{"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"758696c8dc6378ab9b25bb42790da2f5"},"ciphertext":"54eb50683717d41caaeb12ea969f2c159daada5907383f26f327606a37dc7168","kdf":"pbkdf2","kdfparams":{"c":1024,"dklen":32,"prf":"hmac-sha256","salt":"3c320fa566a1a7963ac8df68a19548d27c8f40bf92ef87c84594dcd5bbc402b6"},"mac":"9e5c2314c2a0781962db85611417c614bd6756666b6b1e93840f5b6ed895f003"}}"#;
let dir: PathBuf = temp_path.path().into();
let mut vault_file_path: PathBuf = dir.clone();
vault_file_path.push(VAULT_FILE_NAME);
{
let mut vault_file = fs::File::create(vault_file_path).unwrap();
vault_file
.write_all(vault_file_contents.as_bytes())
.unwrap();
}
#[test]
fn read_vault_file_succeeds() {
// given
let temp_path = TempDir::new("").unwrap();
let key = VaultKey::new(&"password".into(), *ITERATIONS);
let vault_file_contents = r#"{"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"758696c8dc6378ab9b25bb42790da2f5"},"ciphertext":"54eb50683717d41caaeb12ea969f2c159daada5907383f26f327606a37dc7168","kdf":"pbkdf2","kdfparams":{"c":1024,"dklen":32,"prf":"hmac-sha256","salt":"3c320fa566a1a7963ac8df68a19548d27c8f40bf92ef87c84594dcd5bbc402b6"},"mac":"9e5c2314c2a0781962db85611417c614bd6756666b6b1e93840f5b6ed895f003"}}"#;
let dir: PathBuf = temp_path.path().into();
let mut vault_file_path: PathBuf = dir.clone();
vault_file_path.push(VAULT_FILE_NAME);
{
let mut vault_file = fs::File::create(vault_file_path).unwrap();
vault_file.write_all(vault_file_contents.as_bytes()).unwrap();
}
// when
let result = read_vault_file(&dir, Some(&key));
// when
let result = read_vault_file(&dir, Some(&key));
// then
assert!(result.is_ok());
}
// then
assert!(result.is_ok());
}
#[test]
fn read_vault_file_fails() {
// given
let temp_path = TempDir::new("").unwrap();
let key = VaultKey::new(&"password1".into(), *ITERATIONS);
let dir: PathBuf = temp_path.path().into();
let mut vault_file_path: PathBuf = dir.clone();
vault_file_path.push(VAULT_FILE_NAME);
#[test]
fn read_vault_file_fails() {
// given
let temp_path = TempDir::new("").unwrap();
let key = VaultKey::new(&"password1".into(), *ITERATIONS);
let dir: PathBuf = temp_path.path().into();
let mut vault_file_path: PathBuf = dir.clone();
vault_file_path.push(VAULT_FILE_NAME);
// when
let result = read_vault_file(&dir, Some(&key));
// when
let result = read_vault_file(&dir, Some(&key));
// then
assert!(result.is_err());
// then
assert!(result.is_err());
// and when given
let vault_file_contents = r#"{"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"0155e3690be19fbfbecabcd440aa284b"},"ciphertext":"4d6938a1f49b7782","kdf":"pbkdf2","kdfparams":{"c":1024,"dklen":32,"prf":"hmac-sha256","salt":"b6a9338a7ccd39288a86dba73bfecd9101b4f3db9c9830e7c76afdbd4f6872e5"},"mac":"16381463ea11c6eb2239a9f339c2e780516d29d234ce30ac5f166f9080b5a262"}}"#;
{
let mut vault_file = fs::File::create(vault_file_path).unwrap();
vault_file
.write_all(vault_file_contents.as_bytes())
.unwrap();
}
// and when given
let vault_file_contents = r#"{"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"0155e3690be19fbfbecabcd440aa284b"},"ciphertext":"4d6938a1f49b7782","kdf":"pbkdf2","kdfparams":{"c":1024,"dklen":32,"prf":"hmac-sha256","salt":"b6a9338a7ccd39288a86dba73bfecd9101b4f3db9c9830e7c76afdbd4f6872e5"},"mac":"16381463ea11c6eb2239a9f339c2e780516d29d234ce30ac5f166f9080b5a262"}}"#;
{
let mut vault_file = fs::File::create(vault_file_path).unwrap();
vault_file.write_all(vault_file_contents.as_bytes()).unwrap();
}
// when
let result = read_vault_file(&dir, Some(&key));
// when
let result = read_vault_file(&dir, Some(&key));
// then
assert!(result.is_err());
}
// then
assert!(result.is_err());
}
#[test]
fn vault_directory_can_be_created() {
// given
let temp_path = TempDir::new("").unwrap();
let key = VaultKey::new(&"password".into(), *ITERATIONS);
let dir: PathBuf = temp_path.path().into();
#[test]
fn vault_directory_can_be_created() {
// given
let temp_path = TempDir::new("").unwrap();
let key = VaultKey::new(&"password".into(), *ITERATIONS);
let dir: PathBuf = temp_path.path().into();
// when
let vault = VaultDiskDirectory::create(&dir, "vault", key.clone());
// when
let vault = VaultDiskDirectory::create(&dir, "vault", key.clone());
// then
assert!(vault.is_ok());
// then
assert!(vault.is_ok());
// and when
let vault = VaultDiskDirectory::at(&dir, "vault", key);
// and when
let vault = VaultDiskDirectory::at(&dir, "vault", key);
// then
assert!(vault.is_ok());
}
// then
assert!(vault.is_ok());
}
#[test]
fn vault_directory_cannot_be_created_if_already_exists() {
// given
let temp_path = TempDir::new("").unwrap();
let key = VaultKey::new(&"password".into(), *ITERATIONS);
let dir: PathBuf = temp_path.path().into();
let mut vault_dir = dir.clone();
vault_dir.push("vault");
fs::create_dir_all(&vault_dir).unwrap();
#[test]
fn vault_directory_cannot_be_created_if_already_exists() {
// given
let temp_path = TempDir::new("").unwrap();
let key = VaultKey::new(&"password".into(), *ITERATIONS);
let dir: PathBuf = temp_path.path().into();
let mut vault_dir = dir.clone();
vault_dir.push("vault");
fs::create_dir_all(&vault_dir).unwrap();
// when
let vault = VaultDiskDirectory::create(&dir, "vault", key);
// when
let vault = VaultDiskDirectory::create(&dir, "vault", key);
// then
assert!(vault.is_err());
}
// then
assert!(vault.is_err());
}
#[test]
fn vault_directory_cannot_be_opened_if_not_exists() {
// given
let temp_path = TempDir::new("").unwrap();
let key = VaultKey::new(&"password".into(), *ITERATIONS);
let dir: PathBuf = temp_path.path().into();
#[test]
fn vault_directory_cannot_be_opened_if_not_exists() {
// given
let temp_path = TempDir::new("").unwrap();
let key = VaultKey::new(&"password".into(), *ITERATIONS);
let dir: PathBuf = temp_path.path().into();
// when
let vault = VaultDiskDirectory::at(&dir, "vault", key);
// when
let vault = VaultDiskDirectory::at(&dir, "vault", key);
// then
assert!(vault.is_err());
}
// then
assert!(vault.is_err());
}
}

View File

@ -14,115 +14,113 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::fmt;
use std::io::Error as IoError;
use ethkey::{self, Error as EthKeyError};
use crypto::{self, Error as EthCryptoError};
use ethkey::DerivationError;
use ethkey::{self, DerivationError, Error as EthKeyError};
use std::{fmt, io::Error as IoError};
/// Account-related errors.
#[derive(Debug)]
pub enum Error {
/// IO error
Io(IoError),
/// Invalid Password
InvalidPassword,
/// Account's secret is invalid.
InvalidSecret,
/// Invalid Vault Crypto meta.
InvalidCryptoMeta,
/// Invalid Account.
InvalidAccount,
/// Invalid Message.
InvalidMessage,
/// Invalid Key File
InvalidKeyFile(String),
/// Vaults are not supported.
VaultsAreNotSupported,
/// Unsupported vault
UnsupportedVault,
/// Invalid vault name
InvalidVaultName,
/// Vault not found
VaultNotFound,
/// Account creation failed.
CreationFailed,
/// `EthKey` error
EthKey(EthKeyError),
/// `ethkey::crypto::Error`
EthKeyCrypto(ethkey::crypto::Error),
/// `EthCrypto` error
EthCrypto(EthCryptoError),
/// Derivation error
Derivation(DerivationError),
/// Custom error
Custom(String),
/// IO error
Io(IoError),
/// Invalid Password
InvalidPassword,
/// Account's secret is invalid.
InvalidSecret,
/// Invalid Vault Crypto meta.
InvalidCryptoMeta,
/// Invalid Account.
InvalidAccount,
/// Invalid Message.
InvalidMessage,
/// Invalid Key File
InvalidKeyFile(String),
/// Vaults are not supported.
VaultsAreNotSupported,
/// Unsupported vault
UnsupportedVault,
/// Invalid vault name
InvalidVaultName,
/// Vault not found
VaultNotFound,
/// Account creation failed.
CreationFailed,
/// `EthKey` error
EthKey(EthKeyError),
/// `ethkey::crypto::Error`
EthKeyCrypto(ethkey::crypto::Error),
/// `EthCrypto` error
EthCrypto(EthCryptoError),
/// Derivation error
Derivation(DerivationError),
/// Custom error
Custom(String),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let s = match *self {
Error::Io(ref err) => err.to_string(),
Error::InvalidPassword => "Invalid password".into(),
Error::InvalidSecret => "Invalid secret".into(),
Error::InvalidCryptoMeta => "Invalid crypted metadata".into(),
Error::InvalidAccount => "Invalid account".into(),
Error::InvalidMessage => "Invalid message".into(),
Error::InvalidKeyFile(ref reason) => format!("Invalid key file: {}", reason),
Error::VaultsAreNotSupported => "Vaults are not supported".into(),
Error::UnsupportedVault => "Vault is not supported for this operation".into(),
Error::InvalidVaultName => "Invalid vault name".into(),
Error::VaultNotFound => "Vault not found".into(),
Error::CreationFailed => "Account creation failed".into(),
Error::EthKey(ref err) => err.to_string(),
Error::EthKeyCrypto(ref err) => err.to_string(),
Error::EthCrypto(ref err) => err.to_string(),
Error::Derivation(ref err) => format!("Derivation error: {:?}", err),
Error::Custom(ref s) => s.clone(),
};
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let s = match *self {
Error::Io(ref err) => err.to_string(),
Error::InvalidPassword => "Invalid password".into(),
Error::InvalidSecret => "Invalid secret".into(),
Error::InvalidCryptoMeta => "Invalid crypted metadata".into(),
Error::InvalidAccount => "Invalid account".into(),
Error::InvalidMessage => "Invalid message".into(),
Error::InvalidKeyFile(ref reason) => format!("Invalid key file: {}", reason),
Error::VaultsAreNotSupported => "Vaults are not supported".into(),
Error::UnsupportedVault => "Vault is not supported for this operation".into(),
Error::InvalidVaultName => "Invalid vault name".into(),
Error::VaultNotFound => "Vault not found".into(),
Error::CreationFailed => "Account creation failed".into(),
Error::EthKey(ref err) => err.to_string(),
Error::EthKeyCrypto(ref err) => err.to_string(),
Error::EthCrypto(ref err) => err.to_string(),
Error::Derivation(ref err) => format!("Derivation error: {:?}", err),
Error::Custom(ref s) => s.clone(),
};
write!(f, "{}", s)
}
write!(f, "{}", s)
}
}
impl From<IoError> for Error {
fn from(err: IoError) -> Self {
Error::Io(err)
}
fn from(err: IoError) -> Self {
Error::Io(err)
}
}
impl From<EthKeyError> for Error {
fn from(err: EthKeyError) -> Self {
Error::EthKey(err)
}
fn from(err: EthKeyError) -> Self {
Error::EthKey(err)
}
}
impl From<ethkey::crypto::Error> for Error {
fn from(err: ethkey::crypto::Error) -> Self {
Error::EthKeyCrypto(err)
}
fn from(err: ethkey::crypto::Error) -> Self {
Error::EthKeyCrypto(err)
}
}
impl From<EthCryptoError> for Error {
fn from(err: EthCryptoError) -> Self {
Error::EthCrypto(err)
}
fn from(err: EthCryptoError) -> Self {
Error::EthCrypto(err)
}
}
impl From<crypto::error::ScryptError> for Error {
fn from(err: crypto::error::ScryptError) -> Self {
Error::EthCrypto(err.into())
}
fn from(err: crypto::error::ScryptError) -> Self {
Error::EthCrypto(err.into())
}
}
impl From<crypto::error::SymmError> for Error {
fn from(err: crypto::error::SymmError) -> Self {
Error::EthCrypto(err.into())
}
fn from(err: crypto::error::SymmError) -> Self {
Error::EthCrypto(err.into())
}
}
impl From<DerivationError> for Error {
fn from(err: DerivationError) -> Self {
Error::Derivation(err)
}
fn from(err: DerivationError) -> Self {
Error::Derivation(err)
}
}

View File

@ -19,23 +19,23 @@ pub use _ethkey::*;
use json;
impl Into<json::H160> for Address {
fn into(self) -> json::H160 {
let a: [u8; 20] = self.into();
From::from(a)
}
fn into(self) -> json::H160 {
let a: [u8; 20] = self.into();
From::from(a)
}
}
impl From<json::H160> for Address {
fn from(json: json::H160) -> Self {
let a: [u8; 20] = json.into();
From::from(a)
}
fn from(json: json::H160) -> Self {
let a: [u8; 20] = json.into();
From::from(a)
}
}
impl<'a> From<&'a json::H160> for Address {
fn from(json: &'a json::H160) -> Self {
let mut a = [0u8; 20];
a.copy_from_slice(json);
From::from(a)
}
fn from(json: &'a json::H160) -> Self {
let mut a = [0u8; 20];
a.copy_from_slice(json);
From::from(a)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -14,67 +14,86 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::collections::HashSet;
use std::path::Path;
use std::fs;
use std::{collections::HashSet, fs, path::Path};
use ethkey::Address;
use accounts_dir::{KeyDirectory, RootDiskDirectory, DiskKeyFileManager, KeyFileManager};
use accounts_dir::{DiskKeyFileManager, KeyDirectory, KeyFileManager, RootDiskDirectory};
use dir;
use ethkey::Address;
use Error;
/// Import an account from a file.
pub fn import_account(path: &Path, dst: &KeyDirectory) -> Result<Address, Error> {
let key_manager = DiskKeyFileManager::default();
let existing_accounts = dst.load()?.into_iter().map(|a| a.address).collect::<HashSet<_>>();
let filename = path.file_name().and_then(|n| n.to_str()).map(|f| f.to_owned());
let account = fs::File::open(&path)
.map_err(Into::into)
.and_then(|file| key_manager.read(filename, file))?;
let key_manager = DiskKeyFileManager::default();
let existing_accounts = dst
.load()?
.into_iter()
.map(|a| a.address)
.collect::<HashSet<_>>();
let filename = path
.file_name()
.and_then(|n| n.to_str())
.map(|f| f.to_owned());
let account = fs::File::open(&path)
.map_err(Into::into)
.and_then(|file| key_manager.read(filename, file))?;
let address = account.address.clone();
if !existing_accounts.contains(&address) {
dst.insert(account)?;
}
Ok(address)
let address = account.address.clone();
if !existing_accounts.contains(&address) {
dst.insert(account)?;
}
Ok(address)
}
/// Import all accounts from one directory to the other.
pub fn import_accounts(src: &KeyDirectory, dst: &KeyDirectory) -> Result<Vec<Address>, Error> {
let accounts = src.load()?;
let existing_accounts = dst.load()?.into_iter()
.map(|a| a.address)
.collect::<HashSet<_>>();
let accounts = src.load()?;
let existing_accounts = dst
.load()?
.into_iter()
.map(|a| a.address)
.collect::<HashSet<_>>();
accounts.into_iter()
.filter(|a| !existing_accounts.contains(&a.address))
.map(|a| {
let address = a.address.clone();
dst.insert(a)?;
Ok(address)
}).collect()
accounts
.into_iter()
.filter(|a| !existing_accounts.contains(&a.address))
.map(|a| {
let address = a.address.clone();
dst.insert(a)?;
Ok(address)
})
.collect()
}
/// Provide a `HashSet` of all accounts available for import from the Geth keystore.
pub fn read_geth_accounts(testnet: bool) -> Vec<Address> {
RootDiskDirectory::at(dir::geth(testnet))
.load()
.map(|d| d.into_iter().map(|a| a.address).collect())
.unwrap_or_else(|_| Vec::new())
RootDiskDirectory::at(dir::geth(testnet))
.load()
.map(|d| d.into_iter().map(|a| a.address).collect())
.unwrap_or_else(|_| Vec::new())
}
/// Import specific `desired` accounts from the Geth keystore into `dst`.
pub fn import_geth_accounts(dst: &KeyDirectory, desired: HashSet<Address>, testnet: bool) -> Result<Vec<Address>, Error> {
let src = RootDiskDirectory::at(dir::geth(testnet));
let accounts = src.load()?;
let existing_accounts = dst.load()?.into_iter().map(|a| a.address).collect::<HashSet<_>>();
pub fn import_geth_accounts(
dst: &KeyDirectory,
desired: HashSet<Address>,
testnet: bool,
) -> Result<Vec<Address>, Error> {
let src = RootDiskDirectory::at(dir::geth(testnet));
let accounts = src.load()?;
let existing_accounts = dst
.load()?
.into_iter()
.map(|a| a.address)
.collect::<HashSet<_>>();
accounts.into_iter()
.filter(|a| !existing_accounts.contains(&a.address))
.filter(|a| desired.contains(&a.address))
.map(|a| {
let address = a.address.clone();
dst.insert(a)?;
Ok(address)
}).collect()
accounts
.into_iter()
.filter(|a| !existing_accounts.contains(&a.address))
.filter(|a| desired.contains(&a.address))
.map(|a| {
let address = a.address.clone();
dst.insert(a)?;
Ok(address)
})
.collect()
}

View File

@ -14,61 +14,69 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use rustc_hex::{FromHex, FromHexError, ToHex};
use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer};
use std::{ops, str};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde::de::Error;
use rustc_hex::{ToHex, FromHex, FromHexError};
#[derive(Debug, PartialEq)]
pub struct Bytes(Vec<u8>);
impl ops::Deref for Bytes {
type Target = [u8];
type Target = [u8];
fn deref(&self) -> &Self::Target {
&self.0
}
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<'a> Deserialize<'a> for Bytes {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'a>
{
let s = String::deserialize(deserializer)?;
let data = s.from_hex().map_err(|e| Error::custom(format!("Invalid hex value {}", e)))?;
Ok(Bytes(data))
}
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'a>,
{
let s = String::deserialize(deserializer)?;
let data = s
.from_hex()
.map_err(|e| Error::custom(format!("Invalid hex value {}", e)))?;
Ok(Bytes(data))
}
}
impl Serialize for Bytes {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
serializer.serialize_str(&self.0.to_hex())
}
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.0.to_hex())
}
}
impl str::FromStr for Bytes {
type Err = FromHexError;
type Err = FromHexError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
s.from_hex().map(Bytes)
}
fn from_str(s: &str) -> Result<Self, Self::Err> {
s.from_hex().map(Bytes)
}
}
impl From<&'static str> for Bytes {
fn from(s: &'static str) -> Self {
s.parse().expect(&format!("invalid string literal for {}: '{}'", stringify!(Self), s))
}
fn from(s: &'static str) -> Self {
s.parse().expect(&format!(
"invalid string literal for {}: '{}'",
stringify!(Self),
s
))
}
}
impl From<Vec<u8>> for Bytes {
fn from(v: Vec<u8>) -> Self {
Bytes(v)
}
fn from(v: Vec<u8>) -> Self {
Bytes(v)
}
}
impl From<Bytes> for Vec<u8> {
fn from(b: Bytes) -> Self {
b.0
}
fn from(b: Bytes) -> Self {
b.0
}
}

View File

@ -14,83 +14,99 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::fmt;
use serde::{Serialize, Serializer, Deserialize, Deserializer};
use serde::de::{Visitor, Error as SerdeError};
use super::{Error, H128};
use serde::{
de::{Error as SerdeError, Visitor},
Deserialize, Deserializer, Serialize, Serializer,
};
use std::fmt;
#[derive(Debug, PartialEq)]
pub enum CipherSer {
Aes128Ctr,
Aes128Ctr,
}
impl Serialize for CipherSer {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
match *self {
CipherSer::Aes128Ctr => serializer.serialize_str("aes-128-ctr"),
}
}
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
CipherSer::Aes128Ctr => serializer.serialize_str("aes-128-ctr"),
}
}
}
impl<'a> Deserialize<'a> for CipherSer {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'a> {
deserializer.deserialize_any(CipherSerVisitor)
}
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'a>,
{
deserializer.deserialize_any(CipherSerVisitor)
}
}
struct CipherSerVisitor;
impl<'a> Visitor<'a> for CipherSerVisitor {
type Value = CipherSer;
type Value = CipherSer;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a valid cipher identifier")
}
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a valid cipher identifier")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> where E: SerdeError {
match value {
"aes-128-ctr" => Ok(CipherSer::Aes128Ctr),
_ => Err(SerdeError::custom(Error::UnsupportedCipher))
}
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: SerdeError,
{
match value {
"aes-128-ctr" => Ok(CipherSer::Aes128Ctr),
_ => Err(SerdeError::custom(Error::UnsupportedCipher)),
}
}
fn visit_string<E>(self, value: String) -> Result<Self::Value, E> where E: SerdeError {
self.visit_str(value.as_ref())
}
fn visit_string<E>(self, value: String) -> Result<Self::Value, E>
where
E: SerdeError,
{
self.visit_str(value.as_ref())
}
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Aes128Ctr {
pub iv: H128,
pub iv: H128,
}
#[derive(Debug, PartialEq)]
pub enum CipherSerParams {
Aes128Ctr(Aes128Ctr),
Aes128Ctr(Aes128Ctr),
}
impl Serialize for CipherSerParams {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
match *self {
CipherSerParams::Aes128Ctr(ref params) => params.serialize(serializer),
}
}
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
CipherSerParams::Aes128Ctr(ref params) => params.serialize(serializer),
}
}
}
impl<'a> Deserialize<'a> for CipherSerParams {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'a> {
Aes128Ctr::deserialize(deserializer)
.map(CipherSerParams::Aes128Ctr)
.map_err(|_| Error::InvalidCipherParams)
.map_err(SerdeError::custom)
}
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'a>,
{
Aes128Ctr::deserialize(deserializer)
.map(CipherSerParams::Aes128Ctr)
.map_err(|_| Error::InvalidCipherParams)
.map_err(SerdeError::custom)
}
}
#[derive(Debug, PartialEq)]
pub enum Cipher {
Aes128Ctr(Aes128Ctr),
Aes128Ctr(Aes128Ctr),
}

View File

@ -14,181 +14,205 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::{fmt, str};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde::ser::SerializeStruct;
use serde::de::{Visitor, MapAccess, Error};
use super::{Bytes, Cipher, CipherSer, CipherSerParams, Kdf, KdfSer, KdfSerParams, H256};
use serde::{
de::{Error, MapAccess, Visitor},
ser::SerializeStruct,
Deserialize, Deserializer, Serialize, Serializer,
};
use serde_json;
use super::{Cipher, CipherSer, CipherSerParams, Kdf, KdfSer, KdfSerParams, H256, Bytes};
use std::{fmt, str};
pub type CipherText = Bytes;
#[derive(Debug, PartialEq)]
pub struct Crypto {
pub cipher: Cipher,
pub ciphertext: CipherText,
pub kdf: Kdf,
pub mac: H256,
pub cipher: Cipher,
pub ciphertext: CipherText,
pub kdf: Kdf,
pub mac: H256,
}
impl str::FromStr for Crypto {
type Err = serde_json::error::Error;
type Err = serde_json::error::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
serde_json::from_str(s)
}
fn from_str(s: &str) -> Result<Self, Self::Err> {
serde_json::from_str(s)
}
}
impl From<Crypto> for String {
fn from(c: Crypto) -> Self {
serde_json::to_string(&c).expect("serialization cannot fail, cause all crypto keys are strings")
}
fn from(c: Crypto) -> Self {
serde_json::to_string(&c)
.expect("serialization cannot fail, cause all crypto keys are strings")
}
}
enum CryptoField {
Cipher,
CipherParams,
CipherText,
Kdf,
KdfParams,
Mac,
Version,
Cipher,
CipherParams,
CipherText,
Kdf,
KdfParams,
Mac,
Version,
}
impl<'a> Deserialize<'a> for CryptoField {
fn deserialize<D>(deserializer: D) -> Result<CryptoField, D::Error>
where D: Deserializer<'a>
{
deserializer.deserialize_any(CryptoFieldVisitor)
}
fn deserialize<D>(deserializer: D) -> Result<CryptoField, D::Error>
where
D: Deserializer<'a>,
{
deserializer.deserialize_any(CryptoFieldVisitor)
}
}
struct CryptoFieldVisitor;
impl<'a> Visitor<'a> for CryptoFieldVisitor {
type Value = CryptoField;
type Value = CryptoField;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a valid crypto struct description")
}
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a valid crypto struct description")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where E: Error
{
match value {
"cipher" => Ok(CryptoField::Cipher),
"cipherparams" => Ok(CryptoField::CipherParams),
"ciphertext" => Ok(CryptoField::CipherText),
"kdf" => Ok(CryptoField::Kdf),
"kdfparams" => Ok(CryptoField::KdfParams),
"mac" => Ok(CryptoField::Mac),
"version" => Ok(CryptoField::Version),
_ => Err(Error::custom(format!("Unknown field: '{}'", value))),
}
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: Error,
{
match value {
"cipher" => Ok(CryptoField::Cipher),
"cipherparams" => Ok(CryptoField::CipherParams),
"ciphertext" => Ok(CryptoField::CipherText),
"kdf" => Ok(CryptoField::Kdf),
"kdfparams" => Ok(CryptoField::KdfParams),
"mac" => Ok(CryptoField::Mac),
"version" => Ok(CryptoField::Version),
_ => Err(Error::custom(format!("Unknown field: '{}'", value))),
}
}
}
impl<'a> Deserialize<'a> for Crypto {
fn deserialize<D>(deserializer: D) -> Result<Crypto, D::Error>
where D: Deserializer<'a>
{
static FIELDS: &'static [&'static str] = &["id", "version", "crypto", "Crypto", "address"];
deserializer.deserialize_struct("Crypto", FIELDS, CryptoVisitor)
}
fn deserialize<D>(deserializer: D) -> Result<Crypto, D::Error>
where
D: Deserializer<'a>,
{
static FIELDS: &'static [&'static str] = &["id", "version", "crypto", "Crypto", "address"];
deserializer.deserialize_struct("Crypto", FIELDS, CryptoVisitor)
}
}
struct CryptoVisitor;
impl<'a> Visitor<'a> for CryptoVisitor {
type Value = Crypto;
type Value = Crypto;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a valid vault crypto object")
}
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a valid vault crypto object")
}
fn visit_map<V>(self, mut visitor: V) -> Result<Self::Value, V::Error>
where V: MapAccess<'a>
{
let mut cipher = None;
let mut cipherparams = None;
let mut ciphertext = None;
let mut kdf = None;
let mut kdfparams = None;
let mut mac = None;
fn visit_map<V>(self, mut visitor: V) -> Result<Self::Value, V::Error>
where
V: MapAccess<'a>,
{
let mut cipher = None;
let mut cipherparams = None;
let mut ciphertext = None;
let mut kdf = None;
let mut kdfparams = None;
let mut mac = None;
loop {
match visitor.next_key()? {
Some(CryptoField::Cipher) => { cipher = Some(visitor.next_value()?); }
Some(CryptoField::CipherParams) => { cipherparams = Some(visitor.next_value()?); }
Some(CryptoField::CipherText) => { ciphertext = Some(visitor.next_value()?); }
Some(CryptoField::Kdf) => { kdf = Some(visitor.next_value()?); }
Some(CryptoField::KdfParams) => { kdfparams = Some(visitor.next_value()?); }
Some(CryptoField::Mac) => { mac = Some(visitor.next_value()?); }
// skip not required version field (it appears in pyethereum generated keystores)
Some(CryptoField::Version) => { visitor.next_value().unwrap_or(()) }
None => { break; }
}
}
loop {
match visitor.next_key()? {
Some(CryptoField::Cipher) => {
cipher = Some(visitor.next_value()?);
}
Some(CryptoField::CipherParams) => {
cipherparams = Some(visitor.next_value()?);
}
Some(CryptoField::CipherText) => {
ciphertext = Some(visitor.next_value()?);
}
Some(CryptoField::Kdf) => {
kdf = Some(visitor.next_value()?);
}
Some(CryptoField::KdfParams) => {
kdfparams = Some(visitor.next_value()?);
}
Some(CryptoField::Mac) => {
mac = Some(visitor.next_value()?);
}
// skip not required version field (it appears in pyethereum generated keystores)
Some(CryptoField::Version) => visitor.next_value().unwrap_or(()),
None => {
break;
}
}
}
let cipher = match (cipher, cipherparams) {
(Some(CipherSer::Aes128Ctr), Some(CipherSerParams::Aes128Ctr(params))) => Cipher::Aes128Ctr(params),
(None, _) => return Err(V::Error::missing_field("cipher")),
(Some(_), None) => return Err(V::Error::missing_field("cipherparams")),
};
let cipher = match (cipher, cipherparams) {
(Some(CipherSer::Aes128Ctr), Some(CipherSerParams::Aes128Ctr(params))) => {
Cipher::Aes128Ctr(params)
}
(None, _) => return Err(V::Error::missing_field("cipher")),
(Some(_), None) => return Err(V::Error::missing_field("cipherparams")),
};
let ciphertext = match ciphertext {
Some(ciphertext) => ciphertext,
None => return Err(V::Error::missing_field("ciphertext")),
};
let ciphertext = match ciphertext {
Some(ciphertext) => ciphertext,
None => return Err(V::Error::missing_field("ciphertext")),
};
let kdf = match (kdf, kdfparams) {
(Some(KdfSer::Pbkdf2), Some(KdfSerParams::Pbkdf2(params))) => Kdf::Pbkdf2(params),
(Some(KdfSer::Scrypt), Some(KdfSerParams::Scrypt(params))) => Kdf::Scrypt(params),
(Some(_), Some(_)) => return Err(V::Error::custom("Invalid cipherparams")),
(None, _) => return Err(V::Error::missing_field("kdf")),
(Some(_), None) => return Err(V::Error::missing_field("kdfparams")),
};
let kdf = match (kdf, kdfparams) {
(Some(KdfSer::Pbkdf2), Some(KdfSerParams::Pbkdf2(params))) => Kdf::Pbkdf2(params),
(Some(KdfSer::Scrypt), Some(KdfSerParams::Scrypt(params))) => Kdf::Scrypt(params),
(Some(_), Some(_)) => return Err(V::Error::custom("Invalid cipherparams")),
(None, _) => return Err(V::Error::missing_field("kdf")),
(Some(_), None) => return Err(V::Error::missing_field("kdfparams")),
};
let mac = match mac {
Some(mac) => mac,
None => return Err(V::Error::missing_field("mac")),
};
let mac = match mac {
Some(mac) => mac,
None => return Err(V::Error::missing_field("mac")),
};
let result = Crypto {
cipher: cipher,
ciphertext: ciphertext,
kdf: kdf,
mac: mac,
};
let result = Crypto {
cipher: cipher,
ciphertext: ciphertext,
kdf: kdf,
mac: mac,
};
Ok(result)
}
Ok(result)
}
}
impl Serialize for Crypto {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer
{
let mut crypto = serializer.serialize_struct("Crypto", 6)?;
match self.cipher {
Cipher::Aes128Ctr(ref params) => {
crypto.serialize_field("cipher", &CipherSer::Aes128Ctr)?;
crypto.serialize_field("cipherparams", params)?;
},
}
crypto.serialize_field("ciphertext", &self.ciphertext)?;
match self.kdf {
Kdf::Pbkdf2(ref params) => {
crypto.serialize_field("kdf", &KdfSer::Pbkdf2)?;
crypto.serialize_field("kdfparams", params)?;
},
Kdf::Scrypt(ref params) => {
crypto.serialize_field("kdf", &KdfSer::Scrypt)?;
crypto.serialize_field("kdfparams", params)?;
},
}
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut crypto = serializer.serialize_struct("Crypto", 6)?;
match self.cipher {
Cipher::Aes128Ctr(ref params) => {
crypto.serialize_field("cipher", &CipherSer::Aes128Ctr)?;
crypto.serialize_field("cipherparams", params)?;
}
}
crypto.serialize_field("ciphertext", &self.ciphertext)?;
match self.kdf {
Kdf::Pbkdf2(ref params) => {
crypto.serialize_field("kdf", &KdfSer::Pbkdf2)?;
crypto.serialize_field("kdfparams", params)?;
}
Kdf::Scrypt(ref params) => {
crypto.serialize_field("kdf", &KdfSer::Scrypt)?;
crypto.serialize_field("kdfparams", params)?;
}
}
crypto.serialize_field("mac", &self.mac)?;
crypto.end()
}
crypto.serialize_field("mac", &self.mac)?;
crypto.end()
}
}

View File

@ -18,33 +18,33 @@ use std::fmt;
#[derive(Debug, PartialEq)]
pub enum Error {
UnsupportedCipher,
InvalidCipherParams,
UnsupportedKdf,
InvalidUuid,
UnsupportedVersion,
InvalidCiphertext,
InvalidH256,
InvalidPrf,
UnsupportedCipher,
InvalidCipherParams,
UnsupportedKdf,
InvalidUuid,
UnsupportedVersion,
InvalidCiphertext,
InvalidH256,
InvalidPrf,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
Error::InvalidUuid => write!(f, "Invalid Uuid"),
Error::UnsupportedVersion => write!(f, "Unsupported version"),
Error::UnsupportedKdf => write!(f, "Unsupported kdf"),
Error::InvalidCiphertext => write!(f, "Invalid ciphertext"),
Error::UnsupportedCipher => write!(f, "Unsupported cipher"),
Error::InvalidCipherParams => write!(f, "Invalid cipher params"),
Error::InvalidH256 => write!(f, "Invalid hash"),
Error::InvalidPrf => write!(f, "Invalid prf"),
}
}
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
Error::InvalidUuid => write!(f, "Invalid Uuid"),
Error::UnsupportedVersion => write!(f, "Unsupported version"),
Error::UnsupportedKdf => write!(f, "Unsupported kdf"),
Error::InvalidCiphertext => write!(f, "Invalid ciphertext"),
Error::UnsupportedCipher => write!(f, "Unsupported cipher"),
Error::InvalidCipherParams => write!(f, "Invalid cipher params"),
Error::InvalidH256 => write!(f, "Invalid hash"),
Error::InvalidPrf => write!(f, "Invalid prf"),
}
}
}
impl Into<String> for Error {
fn into(self) -> String {
format!("{}", self)
}
fn into(self) -> String {
format!("{}", self)
}
}

View File

@ -14,104 +14,120 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::{ops, fmt, str};
use rustc_hex::{FromHex, ToHex};
use serde::{Serialize, Serializer, Deserialize, Deserializer};
use serde::de::{Visitor, Error as SerdeError};
use super::Error;
use rustc_hex::{FromHex, ToHex};
use serde::{
de::{Error as SerdeError, Visitor},
Deserialize, Deserializer, Serialize, Serializer,
};
use std::{fmt, ops, str};
macro_rules! impl_hash {
($name: ident, $size: expr) => {
pub struct $name([u8; $size]);
($name: ident, $size: expr) => {
pub struct $name([u8; $size]);
impl fmt::Debug for $name {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let self_ref: &[u8] = &self.0;
write!(f, "{:?}", self_ref)
}
}
impl fmt::Debug for $name {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let self_ref: &[u8] = &self.0;
write!(f, "{:?}", self_ref)
}
}
impl PartialEq for $name {
fn eq(&self, other: &Self) -> bool {
let self_ref: &[u8] = &self.0;
let other_ref: &[u8] = &other.0;
self_ref == other_ref
}
}
impl PartialEq for $name {
fn eq(&self, other: &Self) -> bool {
let self_ref: &[u8] = &self.0;
let other_ref: &[u8] = &other.0;
self_ref == other_ref
}
}
impl ops::Deref for $name {
type Target = [u8];
impl ops::Deref for $name {
type Target = [u8];
fn deref(&self) -> &Self::Target {
&self.0
}
}
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl Serialize for $name {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
serializer.serialize_str(&self.0.to_hex())
}
}
impl Serialize for $name {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.0.to_hex())
}
}
impl<'a> Deserialize<'a> for $name {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'a> {
struct HashVisitor;
impl<'a> Deserialize<'a> for $name {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'a>,
{
struct HashVisitor;
impl<'b> Visitor<'b> for HashVisitor {
type Value = $name;
impl<'b> Visitor<'b> for HashVisitor {
type Value = $name;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a hex-encoded {}", stringify!($name))
}
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a hex-encoded {}", stringify!($name))
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> where E: SerdeError {
value.parse().map_err(SerdeError::custom)
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: SerdeError,
{
value.parse().map_err(SerdeError::custom)
}
fn visit_string<E>(self, value: String) -> Result<Self::Value, E> where E: SerdeError {
self.visit_str(value.as_ref())
}
}
fn visit_string<E>(self, value: String) -> Result<Self::Value, E>
where
E: SerdeError,
{
self.visit_str(value.as_ref())
}
}
deserializer.deserialize_any(HashVisitor)
}
}
deserializer.deserialize_any(HashVisitor)
}
}
impl str::FromStr for $name {
type Err = Error;
impl str::FromStr for $name {
type Err = Error;
fn from_str(value: &str) -> Result<Self, Self::Err> {
match value.from_hex() {
Ok(ref hex) if hex.len() == $size => {
let mut hash = [0u8; $size];
hash.clone_from_slice(hex);
Ok($name(hash))
}
_ => Err(Error::InvalidH256),
}
}
}
fn from_str(value: &str) -> Result<Self, Self::Err> {
match value.from_hex() {
Ok(ref hex) if hex.len() == $size => {
let mut hash = [0u8; $size];
hash.clone_from_slice(hex);
Ok($name(hash))
}
_ => Err(Error::InvalidH256),
}
}
}
impl From<&'static str> for $name {
fn from(s: &'static str) -> Self {
s.parse().expect(&format!("invalid string literal for {}: '{}'", stringify!($name), s))
}
}
impl From<&'static str> for $name {
fn from(s: &'static str) -> Self {
s.parse().expect(&format!(
"invalid string literal for {}: '{}'",
stringify!($name),
s
))
}
}
impl From<[u8; $size]> for $name {
fn from(bytes: [u8; $size]) -> Self {
$name(bytes)
}
}
impl From<[u8; $size]> for $name {
fn from(bytes: [u8; $size]) -> Self {
$name(bytes)
}
}
impl Into<[u8; $size]> for $name {
fn into(self) -> [u8; $size] {
self.0
}
}
}
impl Into<[u8; $size]> for $name {
fn into(self) -> [u8; $size] {
self.0
}
}
};
}
impl_hash!(H128, 16);

View File

@ -15,139 +15,165 @@
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
//! Universaly unique identifier.
use std::{fmt, str};
use rustc_hex::{ToHex, FromHex};
use serde::{Deserialize, Serialize, Deserializer, Serializer};
use serde::de::{Visitor, Error as SerdeError};
use super::Error;
use rustc_hex::{FromHex, ToHex};
use serde::{
de::{Error as SerdeError, Visitor},
Deserialize, Deserializer, Serialize, Serializer,
};
use std::{fmt, str};
/// Universaly unique identifier.
#[derive(Debug, PartialEq)]
pub struct Uuid([u8; 16]);
impl From<[u8; 16]> for Uuid {
fn from(uuid: [u8; 16]) -> Self {
Uuid(uuid)
}
fn from(uuid: [u8; 16]) -> Self {
Uuid(uuid)
}
}
impl<'a> Into<String> for &'a Uuid {
fn into(self) -> String {
let d1 = &self.0[0..4];
let d2 = &self.0[4..6];
let d3 = &self.0[6..8];
let d4 = &self.0[8..10];
let d5 = &self.0[10..16];
[d1, d2, d3, d4, d5].into_iter().map(|d| d.to_hex()).collect::<Vec<String>>().join("-")
}
fn into(self) -> String {
let d1 = &self.0[0..4];
let d2 = &self.0[4..6];
let d3 = &self.0[6..8];
let d4 = &self.0[8..10];
let d5 = &self.0[10..16];
[d1, d2, d3, d4, d5]
.into_iter()
.map(|d| d.to_hex())
.collect::<Vec<String>>()
.join("-")
}
}
impl Into<String> for Uuid {
fn into(self) -> String {
Into::into(&self)
}
fn into(self) -> String {
Into::into(&self)
}
}
impl Into<[u8; 16]> for Uuid {
fn into(self) -> [u8; 16] {
self.0
}
fn into(self) -> [u8; 16] {
self.0
}
}
impl fmt::Display for Uuid {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let s: String = (self as &Uuid).into();
write!(f, "{}", s)
}
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let s: String = (self as &Uuid).into();
write!(f, "{}", s)
}
}
fn copy_into(from: &str, into: &mut [u8]) -> Result<(), Error> {
let from = from.from_hex().map_err(|_| Error::InvalidUuid)?;
let from = from.from_hex().map_err(|_| Error::InvalidUuid)?;
if from.len() != into.len() {
return Err(Error::InvalidUuid);
}
if from.len() != into.len() {
return Err(Error::InvalidUuid);
}
into.copy_from_slice(&from);
Ok(())
into.copy_from_slice(&from);
Ok(())
}
impl str::FromStr for Uuid {
type Err = Error;
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parts: Vec<&str> = s.split("-").collect();
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parts: Vec<&str> = s.split("-").collect();
if parts.len() != 5 {
return Err(Error::InvalidUuid);
}
if parts.len() != 5 {
return Err(Error::InvalidUuid);
}
let mut uuid = [0u8; 16];
let mut uuid = [0u8; 16];
copy_into(parts[0], &mut uuid[0..4])?;
copy_into(parts[1], &mut uuid[4..6])?;
copy_into(parts[2], &mut uuid[6..8])?;
copy_into(parts[3], &mut uuid[8..10])?;
copy_into(parts[4], &mut uuid[10..16])?;
copy_into(parts[0], &mut uuid[0..4])?;
copy_into(parts[1], &mut uuid[4..6])?;
copy_into(parts[2], &mut uuid[6..8])?;
copy_into(parts[3], &mut uuid[8..10])?;
copy_into(parts[4], &mut uuid[10..16])?;
Ok(Uuid(uuid))
}
Ok(Uuid(uuid))
}
}
impl From<&'static str> for Uuid {
fn from(s: &'static str) -> Self {
s.parse().expect(&format!("invalid string literal for {}: '{}'", stringify!(Self), s))
}
fn from(s: &'static str) -> Self {
s.parse().expect(&format!(
"invalid string literal for {}: '{}'",
stringify!(Self),
s
))
}
}
impl Serialize for Uuid {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
let s: String = self.into();
serializer.serialize_str(&s)
}
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let s: String = self.into();
serializer.serialize_str(&s)
}
}
impl<'a> Deserialize<'a> for Uuid {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'a> {
deserializer.deserialize_any(UuidVisitor)
}
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'a>,
{
deserializer.deserialize_any(UuidVisitor)
}
}
struct UuidVisitor;
impl<'a> Visitor<'a> for UuidVisitor {
type Value = Uuid;
type Value = Uuid;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a valid hex-encoded UUID")
}
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a valid hex-encoded UUID")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> where E: SerdeError {
value.parse().map_err(SerdeError::custom)
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: SerdeError,
{
value.parse().map_err(SerdeError::custom)
}
fn visit_string<E>(self, value: String) -> Result<Self::Value, E> where E: SerdeError {
self.visit_str(value.as_ref())
}
fn visit_string<E>(self, value: String) -> Result<Self::Value, E>
where
E: SerdeError,
{
self.visit_str(value.as_ref())
}
}
#[cfg(test)]
mod tests {
use super::Uuid;
use super::Uuid;
#[test]
fn uuid_from_str() {
let uuid: Uuid = "3198bc9c-6672-5ab3-d995-4942343ae5b6".into();
assert_eq!(uuid, Uuid::from([0x31, 0x98, 0xbc, 0x9c, 0x66, 0x72, 0x5a, 0xb3, 0xd9, 0x95, 0x49, 0x42, 0x34, 0x3a, 0xe5, 0xb6]));
}
#[test]
fn uuid_from_str() {
let uuid: Uuid = "3198bc9c-6672-5ab3-d995-4942343ae5b6".into();
assert_eq!(
uuid,
Uuid::from([
0x31, 0x98, 0xbc, 0x9c, 0x66, 0x72, 0x5a, 0xb3, 0xd9, 0x95, 0x49, 0x42, 0x34, 0x3a,
0xe5, 0xb6
])
);
}
#[test]
fn uuid_from_and_to_str() {
let from = "3198bc9c-6672-5ab3-d995-4942343ae5b6";
let uuid: Uuid = from.into();
let to: String = uuid.into();
assert_eq!(from, &to);
}
#[test]
fn uuid_from_and_to_str() {
let from = "3198bc9c-6672-5ab3-d995-4942343ae5b6";
let uuid: Uuid = from.into();
let to: String = uuid.into();
assert_eq!(from, &to);
}
}

View File

@ -14,147 +14,173 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::fmt;
use std::num::NonZeroU32;
use serde::{Serialize, Serializer, Deserialize, Deserializer};
use serde::de::{Visitor, Error as SerdeError};
use super::{Error, Bytes};
use super::{Bytes, Error};
use serde::{
de::{Error as SerdeError, Visitor},
Deserialize, Deserializer, Serialize, Serializer,
};
use std::{fmt, num::NonZeroU32};
#[derive(Debug, PartialEq)]
pub enum KdfSer {
Pbkdf2,
Scrypt,
Pbkdf2,
Scrypt,
}
impl Serialize for KdfSer {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
match *self {
KdfSer::Pbkdf2 => serializer.serialize_str("pbkdf2"),
KdfSer::Scrypt => serializer.serialize_str("scrypt"),
}
}
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
KdfSer::Pbkdf2 => serializer.serialize_str("pbkdf2"),
KdfSer::Scrypt => serializer.serialize_str("scrypt"),
}
}
}
impl<'a> Deserialize<'a> for KdfSer {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'a> {
deserializer.deserialize_any(KdfSerVisitor)
}
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'a>,
{
deserializer.deserialize_any(KdfSerVisitor)
}
}
struct KdfSerVisitor;
impl<'a> Visitor<'a> for KdfSerVisitor {
type Value = KdfSer;
type Value = KdfSer;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a kdf algorithm identifier")
}
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a kdf algorithm identifier")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> where E: SerdeError {
match value {
"pbkdf2" => Ok(KdfSer::Pbkdf2),
"scrypt" => Ok(KdfSer::Scrypt),
_ => Err(SerdeError::custom(Error::UnsupportedKdf))
}
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: SerdeError,
{
match value {
"pbkdf2" => Ok(KdfSer::Pbkdf2),
"scrypt" => Ok(KdfSer::Scrypt),
_ => Err(SerdeError::custom(Error::UnsupportedKdf)),
}
}
fn visit_string<E>(self, value: String) -> Result<Self::Value, E> where E: SerdeError {
self.visit_str(value.as_ref())
}
fn visit_string<E>(self, value: String) -> Result<Self::Value, E>
where
E: SerdeError,
{
self.visit_str(value.as_ref())
}
}
#[derive(Debug, PartialEq)]
pub enum Prf {
HmacSha256,
HmacSha256,
}
impl Serialize for Prf {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
match *self {
Prf::HmacSha256 => serializer.serialize_str("hmac-sha256"),
}
}
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
Prf::HmacSha256 => serializer.serialize_str("hmac-sha256"),
}
}
}
impl<'a> Deserialize<'a> for Prf {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'a> {
deserializer.deserialize_any(PrfVisitor)
}
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'a>,
{
deserializer.deserialize_any(PrfVisitor)
}
}
struct PrfVisitor;
impl<'a> Visitor<'a> for PrfVisitor {
type Value = Prf;
type Value = Prf;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a prf algorithm identifier")
}
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a prf algorithm identifier")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> where E: SerdeError {
match value {
"hmac-sha256" => Ok(Prf::HmacSha256),
_ => Err(SerdeError::custom(Error::InvalidPrf)),
}
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: SerdeError,
{
match value {
"hmac-sha256" => Ok(Prf::HmacSha256),
_ => Err(SerdeError::custom(Error::InvalidPrf)),
}
}
fn visit_string<E>(self, value: String) -> Result<Self::Value, E> where E: SerdeError {
self.visit_str(value.as_ref())
}
fn visit_string<E>(self, value: String) -> Result<Self::Value, E>
where
E: SerdeError,
{
self.visit_str(value.as_ref())
}
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Pbkdf2 {
pub c: NonZeroU32,
pub dklen: u32,
pub prf: Prf,
pub salt: Bytes,
pub c: NonZeroU32,
pub dklen: u32,
pub prf: Prf,
pub salt: Bytes,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Scrypt {
pub dklen: u32,
pub p: u32,
pub n: u32,
pub r: u32,
pub salt: Bytes,
pub dklen: u32,
pub p: u32,
pub n: u32,
pub r: u32,
pub salt: Bytes,
}
#[derive(Debug, PartialEq)]
pub enum KdfSerParams {
Pbkdf2(Pbkdf2),
Scrypt(Scrypt),
Pbkdf2(Pbkdf2),
Scrypt(Scrypt),
}
impl Serialize for KdfSerParams {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
match *self {
KdfSerParams::Pbkdf2(ref params) => params.serialize(serializer),
KdfSerParams::Scrypt(ref params) => params.serialize(serializer),
}
}
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
KdfSerParams::Pbkdf2(ref params) => params.serialize(serializer),
KdfSerParams::Scrypt(ref params) => params.serialize(serializer),
}
}
}
impl<'a> Deserialize<'a> for KdfSerParams {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'a> {
use serde_json::{Value, from_value};
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'a>,
{
use serde_json::{from_value, Value};
let v: Value = Deserialize::deserialize(deserializer)?;
let v: Value = Deserialize::deserialize(deserializer)?;
from_value(v.clone()).map(KdfSerParams::Pbkdf2)
.or_else(|_| from_value(v).map(KdfSerParams::Scrypt))
.map_err(|_| D::Error::custom("Invalid KDF algorithm"))
}
from_value(v.clone())
.map(KdfSerParams::Pbkdf2)
.or_else(|_| from_value(v).map(KdfSerParams::Scrypt))
.map_err(|_| D::Error::custom("Invalid KDF algorithm"))
}
}
#[derive(Debug, PartialEq)]
pub enum Kdf {
Pbkdf2(Pbkdf2),
Scrypt(Scrypt),
Pbkdf2(Pbkdf2),
Scrypt(Scrypt),
}

View File

@ -14,182 +14,214 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::fmt;
use std::io::{Read, Write};
use serde::{Serialize, Serializer, Deserialize, Deserializer};
use serde::de::{Error, Visitor, MapAccess, DeserializeOwned};
use super::{Crypto, Uuid, Version, H160};
use serde::{
de::{DeserializeOwned, Error, MapAccess, Visitor},
Deserialize, Deserializer, Serialize, Serializer,
};
use serde_json;
use super::{Uuid, Version, Crypto, H160};
use std::{
fmt,
io::{Read, Write},
};
/// Public opaque type representing serializable `KeyFile`.
#[derive(Debug, PartialEq)]
pub struct OpaqueKeyFile {
key_file: KeyFile
key_file: KeyFile,
}
impl Serialize for OpaqueKeyFile {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where
S: Serializer,
{
self.key_file.serialize(serializer)
}
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.key_file.serialize(serializer)
}
}
impl<T> From<T> for OpaqueKeyFile where T: Into<KeyFile> {
fn from(val: T) -> Self {
OpaqueKeyFile { key_file: val.into() }
}
impl<T> From<T> for OpaqueKeyFile
where
T: Into<KeyFile>,
{
fn from(val: T) -> Self {
OpaqueKeyFile {
key_file: val.into(),
}
}
}
#[derive(Debug, PartialEq, Serialize)]
pub struct KeyFile {
pub id: Uuid,
pub version: Version,
pub crypto: Crypto,
pub address: Option<H160>,
pub name: Option<String>,
pub meta: Option<String>,
pub id: Uuid,
pub version: Version,
pub crypto: Crypto,
pub address: Option<H160>,
pub name: Option<String>,
pub meta: Option<String>,
}
enum KeyFileField {
Id,
Version,
Crypto,
Address,
Name,
Meta,
Id,
Version,
Crypto,
Address,
Name,
Meta,
}
impl<'a> Deserialize<'a> for KeyFileField {
fn deserialize<D>(deserializer: D) -> Result<KeyFileField, D::Error>
where D: Deserializer<'a>
{
deserializer.deserialize_any(KeyFileFieldVisitor)
}
fn deserialize<D>(deserializer: D) -> Result<KeyFileField, D::Error>
where
D: Deserializer<'a>,
{
deserializer.deserialize_any(KeyFileFieldVisitor)
}
}
struct KeyFileFieldVisitor;
impl<'a> Visitor<'a> for KeyFileFieldVisitor {
type Value = KeyFileField;
type Value = KeyFileField;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a valid key file field")
}
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a valid key file field")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where E: Error
{
match value {
"id" => Ok(KeyFileField::Id),
"version" => Ok(KeyFileField::Version),
"crypto" => Ok(KeyFileField::Crypto),
"Crypto" => Ok(KeyFileField::Crypto),
"address" => Ok(KeyFileField::Address),
"name" => Ok(KeyFileField::Name),
"meta" => Ok(KeyFileField::Meta),
_ => Err(Error::custom(format!("Unknown field: '{}'", value))),
}
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: Error,
{
match value {
"id" => Ok(KeyFileField::Id),
"version" => Ok(KeyFileField::Version),
"crypto" => Ok(KeyFileField::Crypto),
"Crypto" => Ok(KeyFileField::Crypto),
"address" => Ok(KeyFileField::Address),
"name" => Ok(KeyFileField::Name),
"meta" => Ok(KeyFileField::Meta),
_ => Err(Error::custom(format!("Unknown field: '{}'", value))),
}
}
}
impl<'a> Deserialize<'a> for KeyFile {
fn deserialize<D>(deserializer: D) -> Result<KeyFile, D::Error>
where D: Deserializer<'a>
{
static FIELDS: &'static [&'static str] = &["id", "version", "crypto", "Crypto", "address"];
deserializer.deserialize_struct("KeyFile", FIELDS, KeyFileVisitor)
}
fn deserialize<D>(deserializer: D) -> Result<KeyFile, D::Error>
where
D: Deserializer<'a>,
{
static FIELDS: &'static [&'static str] = &["id", "version", "crypto", "Crypto", "address"];
deserializer.deserialize_struct("KeyFile", FIELDS, KeyFileVisitor)
}
}
fn none_if_empty<'a, T>(v: Option<serde_json::Value>) -> Option<T> where
T: DeserializeOwned
fn none_if_empty<'a, T>(v: Option<serde_json::Value>) -> Option<T>
where
T: DeserializeOwned,
{
v.and_then(|v| if v.is_null() {
None
} else {
serde_json::from_value(v).ok()
})
v.and_then(|v| {
if v.is_null() {
None
} else {
serde_json::from_value(v).ok()
}
})
}
struct KeyFileVisitor;
impl<'a> Visitor<'a> for KeyFileVisitor {
type Value = KeyFile;
type Value = KeyFile;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a valid key object")
}
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a valid key object")
}
fn visit_map<V>(self, mut visitor: V) -> Result<Self::Value, V::Error>
where V: MapAccess<'a>
{
let mut id = None;
let mut version = None;
let mut crypto = None;
let mut address = None;
let mut name = None;
let mut meta = None;
fn visit_map<V>(self, mut visitor: V) -> Result<Self::Value, V::Error>
where
V: MapAccess<'a>,
{
let mut id = None;
let mut version = None;
let mut crypto = None;
let mut address = None;
let mut name = None;
let mut meta = None;
loop {
match visitor.next_key()? {
Some(KeyFileField::Id) => { id = Some(visitor.next_value()?); }
Some(KeyFileField::Version) => { version = Some(visitor.next_value()?); }
Some(KeyFileField::Crypto) => { crypto = Some(visitor.next_value()?); }
Some(KeyFileField::Address) => { address = Some(visitor.next_value()?); }
Some(KeyFileField::Name) => { name = none_if_empty(visitor.next_value().ok()) }
Some(KeyFileField::Meta) => { meta = none_if_empty(visitor.next_value().ok()) }
None => { break; }
}
}
loop {
match visitor.next_key()? {
Some(KeyFileField::Id) => {
id = Some(visitor.next_value()?);
}
Some(KeyFileField::Version) => {
version = Some(visitor.next_value()?);
}
Some(KeyFileField::Crypto) => {
crypto = Some(visitor.next_value()?);
}
Some(KeyFileField::Address) => {
address = Some(visitor.next_value()?);
}
Some(KeyFileField::Name) => name = none_if_empty(visitor.next_value().ok()),
Some(KeyFileField::Meta) => meta = none_if_empty(visitor.next_value().ok()),
None => {
break;
}
}
}
let id = match id {
Some(id) => id,
None => return Err(V::Error::missing_field("id")),
};
let id = match id {
Some(id) => id,
None => return Err(V::Error::missing_field("id")),
};
let version = match version {
Some(version) => version,
None => return Err(V::Error::missing_field("version")),
};
let version = match version {
Some(version) => version,
None => return Err(V::Error::missing_field("version")),
};
let crypto = match crypto {
Some(crypto) => crypto,
None => return Err(V::Error::missing_field("crypto")),
};
let crypto = match crypto {
Some(crypto) => crypto,
None => return Err(V::Error::missing_field("crypto")),
};
let result = KeyFile {
id: id,
version: version,
crypto: crypto,
address: address,
name: name,
meta: meta,
};
let result = KeyFile {
id: id,
version: version,
crypto: crypto,
address: address,
name: name,
meta: meta,
};
Ok(result)
}
Ok(result)
}
}
impl KeyFile {
pub fn load<R>(reader: R) -> Result<Self, serde_json::Error> where R: Read {
serde_json::from_reader(reader)
}
pub fn load<R>(reader: R) -> Result<Self, serde_json::Error>
where
R: Read,
{
serde_json::from_reader(reader)
}
pub fn write<W>(&self, writer: &mut W) -> Result<(), serde_json::Error> where W: Write {
serde_json::to_writer(writer, self)
}
pub fn write<W>(&self, writer: &mut W) -> Result<(), serde_json::Error>
where
W: Write,
{
serde_json::to_writer(writer, self)
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use serde_json;
use json::{KeyFile, Uuid, Version, Crypto, Cipher, Aes128Ctr, Kdf, Scrypt};
use json::{Aes128Ctr, Cipher, Crypto, Kdf, KeyFile, Scrypt, Uuid, Version};
use serde_json;
use std::str::FromStr;
#[test]
fn basic_keyfile() {
let json = r#"
#[test]
fn basic_keyfile() {
let json = r#"
{
"address": "6edddfc6349aff20bc6467ccf276c5b52487f7a8",
"crypto": {
@ -214,35 +246,36 @@ mod tests {
"meta": "{}"
}"#;
let expected = KeyFile {
id: Uuid::from_str("8777d9f6-7860-4b9b-88b7-0b57ee6b3a73").unwrap(),
version: Version::V3,
address: Some("6edddfc6349aff20bc6467ccf276c5b52487f7a8".into()),
crypto: Crypto {
cipher: Cipher::Aes128Ctr(Aes128Ctr {
iv: "b5a7ec855ec9e2c405371356855fec83".into(),
}),
ciphertext: "7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc".into(),
kdf: Kdf::Scrypt(Scrypt {
n: 262144,
dklen: 32,
p: 1,
r: 8,
salt: "1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209".into(),
}),
mac: "46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f".into(),
},
name: Some("Test".to_owned()),
meta: Some("{}".to_owned()),
};
let expected = KeyFile {
id: Uuid::from_str("8777d9f6-7860-4b9b-88b7-0b57ee6b3a73").unwrap(),
version: Version::V3,
address: Some("6edddfc6349aff20bc6467ccf276c5b52487f7a8".into()),
crypto: Crypto {
cipher: Cipher::Aes128Ctr(Aes128Ctr {
iv: "b5a7ec855ec9e2c405371356855fec83".into(),
}),
ciphertext: "7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc"
.into(),
kdf: Kdf::Scrypt(Scrypt {
n: 262144,
dklen: 32,
p: 1,
r: 8,
salt: "1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209".into(),
}),
mac: "46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f".into(),
},
name: Some("Test".to_owned()),
meta: Some("{}".to_owned()),
};
let keyfile: KeyFile = serde_json::from_str(json).unwrap();
assert_eq!(keyfile, expected);
}
let keyfile: KeyFile = serde_json::from_str(json).unwrap();
assert_eq!(keyfile, expected);
}
#[test]
fn capital_crypto_keyfile() {
let json = r#"
#[test]
fn capital_crypto_keyfile() {
let json = r#"
{
"address": "6edddfc6349aff20bc6467ccf276c5b52487f7a8",
"Crypto": {
@ -265,60 +298,62 @@ mod tests {
"version": 3
}"#;
let expected = KeyFile {
id: "8777d9f6-7860-4b9b-88b7-0b57ee6b3a73".into(),
version: Version::V3,
address: Some("6edddfc6349aff20bc6467ccf276c5b52487f7a8".into()),
crypto: Crypto {
cipher: Cipher::Aes128Ctr(Aes128Ctr {
iv: "b5a7ec855ec9e2c405371356855fec83".into(),
}),
ciphertext: "7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc".into(),
kdf: Kdf::Scrypt(Scrypt {
n: 262144,
dklen: 32,
p: 1,
r: 8,
salt: "1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209".into(),
}),
mac: "46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f".into(),
},
name: None,
meta: None,
};
let expected = KeyFile {
id: "8777d9f6-7860-4b9b-88b7-0b57ee6b3a73".into(),
version: Version::V3,
address: Some("6edddfc6349aff20bc6467ccf276c5b52487f7a8".into()),
crypto: Crypto {
cipher: Cipher::Aes128Ctr(Aes128Ctr {
iv: "b5a7ec855ec9e2c405371356855fec83".into(),
}),
ciphertext: "7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc"
.into(),
kdf: Kdf::Scrypt(Scrypt {
n: 262144,
dklen: 32,
p: 1,
r: 8,
salt: "1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209".into(),
}),
mac: "46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f".into(),
},
name: None,
meta: None,
};
let keyfile: KeyFile = serde_json::from_str(json).unwrap();
assert_eq!(keyfile, expected);
}
let keyfile: KeyFile = serde_json::from_str(json).unwrap();
assert_eq!(keyfile, expected);
}
#[test]
fn to_and_from_json() {
let file = KeyFile {
id: "8777d9f6-7860-4b9b-88b7-0b57ee6b3a73".into(),
version: Version::V3,
address: Some("6edddfc6349aff20bc6467ccf276c5b52487f7a8".into()),
crypto: Crypto {
cipher: Cipher::Aes128Ctr(Aes128Ctr {
iv: "b5a7ec855ec9e2c405371356855fec83".into(),
}),
ciphertext: "7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc".into(),
kdf: Kdf::Scrypt(Scrypt {
n: 262144,
dklen: 32,
p: 1,
r: 8,
salt: "1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209".into(),
}),
mac: "46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f".into(),
},
name: Some("Test".to_owned()),
meta: None,
};
#[test]
fn to_and_from_json() {
let file = KeyFile {
id: "8777d9f6-7860-4b9b-88b7-0b57ee6b3a73".into(),
version: Version::V3,
address: Some("6edddfc6349aff20bc6467ccf276c5b52487f7a8".into()),
crypto: Crypto {
cipher: Cipher::Aes128Ctr(Aes128Ctr {
iv: "b5a7ec855ec9e2c405371356855fec83".into(),
}),
ciphertext: "7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc"
.into(),
kdf: Kdf::Scrypt(Scrypt {
n: 262144,
dklen: 32,
p: 1,
r: 8,
salt: "1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209".into(),
}),
mac: "46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f".into(),
},
name: Some("Test".to_owned()),
meta: None,
};
let serialized = serde_json::to_string(&file).unwrap();
println!("{}", serialized);
let deserialized = serde_json::from_str(&serialized).unwrap();
let serialized = serde_json::to_string(&file).unwrap();
println!("{}", serialized);
let deserialized = serde_json::from_str(&serialized).unwrap();
assert_eq!(file, deserialized);
}
assert_eq!(file, deserialized);
}
}

View File

@ -29,15 +29,20 @@ mod vault_file;
mod vault_key_file;
mod version;
pub use self::bytes::Bytes;
pub use self::cipher::{Cipher, CipherSer, CipherSerParams, Aes128Ctr};
pub use self::crypto::{Crypto, CipherText};
pub use self::error::Error;
pub use self::hash::{H128, H160, H256};
pub use self::id::Uuid;
pub use self::kdf::{Kdf, KdfSer, Prf, Pbkdf2, Scrypt, KdfSerParams};
pub use self::key_file::{KeyFile, OpaqueKeyFile};
pub use self::presale::{PresaleWallet, Encseed};
pub use self::vault_file::VaultFile;
pub use self::vault_key_file::{VaultKeyFile, VaultKeyMeta, insert_vault_name_to_json_meta, remove_vault_name_from_json_meta};
pub use self::version::Version;
pub use self::{
bytes::Bytes,
cipher::{Aes128Ctr, Cipher, CipherSer, CipherSerParams},
crypto::{CipherText, Crypto},
error::Error,
hash::{H128, H160, H256},
id::Uuid,
kdf::{Kdf, KdfSer, KdfSerParams, Pbkdf2, Prf, Scrypt},
key_file::{KeyFile, OpaqueKeyFile},
presale::{Encseed, PresaleWallet},
vault_file::VaultFile,
vault_key_file::{
insert_vault_name_to_json_meta, remove_vault_name_from_json_meta, VaultKeyFile,
VaultKeyMeta,
},
version::Version,
};

View File

@ -14,34 +14,37 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::io::Read;
use super::{Bytes, H160};
use serde_json;
use super::{H160, Bytes};
use std::io::Read;
pub type Encseed = Bytes;
#[derive(Debug, PartialEq, Deserialize)]
pub struct PresaleWallet {
pub encseed: Encseed,
#[serde(rename = "ethaddr")]
pub address: H160,
pub encseed: Encseed,
#[serde(rename = "ethaddr")]
pub address: H160,
}
impl PresaleWallet {
pub fn load<R>(reader: R) -> Result<Self, serde_json::Error> where R: Read {
serde_json::from_reader(reader)
}
pub fn load<R>(reader: R) -> Result<Self, serde_json::Error>
where
R: Read,
{
serde_json::from_reader(reader)
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use serde_json;
use json::{PresaleWallet, H160};
use json::{PresaleWallet, H160};
use serde_json;
use std::str::FromStr;
#[test]
fn presale_wallet() {
let json = r#"
#[test]
fn presale_wallet() {
let json = r#"
{
"encseed": "137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066",
"ethaddr": "ede84640d1a1d3e06902048e67aa7db8d52c2ce1",
@ -49,18 +52,18 @@ mod tests {
"btcaddr": "1JvqEc6WLhg6GnyrLBe2ztPAU28KRfuseH"
} "#;
let expected = PresaleWallet {
let expected = PresaleWallet {
encseed: "137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066".into(),
address: H160::from_str("ede84640d1a1d3e06902048e67aa7db8d52c2ce1").unwrap(),
};
let wallet: PresaleWallet = serde_json::from_str(json).unwrap();
assert_eq!(expected, wallet);
}
let wallet: PresaleWallet = serde_json::from_str(json).unwrap();
assert_eq!(expected, wallet);
}
#[test]
fn long_presale_wallet() {
let json = r#"
#[test]
fn long_presale_wallet() {
let json = r#"
{
"encseed":
"137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0d",
@ -69,12 +72,12 @@ mod tests {
"btcaddr": "1JvqEc6WLhg6GnyrLBe2ztPAU28KRfuseH"
} "#;
let expected = PresaleWallet {
let expected = PresaleWallet {
encseed: "137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0d".into(),
address: H160::from_str("ede84640d1a1d3e06902048e67aa7db8d52c2ce1").unwrap(),
};
let wallet: PresaleWallet = serde_json::from_str(json).unwrap();
assert_eq!(expected, wallet);
}
let wallet: PresaleWallet = serde_json::from_str(json).unwrap();
assert_eq!(expected, wallet);
}
}

View File

@ -14,86 +14,92 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::io::{Read, Write};
use serde_json;
use super::Crypto;
use serde_json;
use std::io::{Read, Write};
/// Vault meta file
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct VaultFile {
/// Vault password, encrypted with vault password
pub crypto: Crypto,
/// Vault metadata string
pub meta: Option<String>,
/// Vault password, encrypted with vault password
pub crypto: Crypto,
/// Vault metadata string
pub meta: Option<String>,
}
impl VaultFile {
pub fn load<R>(reader: R) -> Result<Self, serde_json::Error> where R: Read {
serde_json::from_reader(reader)
}
pub fn load<R>(reader: R) -> Result<Self, serde_json::Error>
where
R: Read,
{
serde_json::from_reader(reader)
}
pub fn write<W>(&self, writer: &mut W) -> Result<(), serde_json::Error> where W: Write {
serde_json::to_writer(writer, self)
}
pub fn write<W>(&self, writer: &mut W) -> Result<(), serde_json::Error>
where
W: Write,
{
serde_json::to_writer(writer, self)
}
}
#[cfg(test)]
mod test {
use serde_json;
use json::{VaultFile, Crypto, Cipher, Aes128Ctr, Kdf, Pbkdf2, Prf};
use std::num::NonZeroU32;
use json::{Aes128Ctr, Cipher, Crypto, Kdf, Pbkdf2, Prf, VaultFile};
use serde_json;
use std::num::NonZeroU32;
lazy_static! {
static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(1024).expect("1024 > 0; qed");
}
lazy_static! {
static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(1024).expect("1024 > 0; qed");
}
#[test]
fn to_and_from_json() {
let file = VaultFile {
crypto: Crypto {
cipher: Cipher::Aes128Ctr(Aes128Ctr {
iv: "0155e3690be19fbfbecabcd440aa284b".into(),
}),
ciphertext: "4d6938a1f49b7782".into(),
kdf: Kdf::Pbkdf2(Pbkdf2 {
c: *ITERATIONS,
dklen: 32,
prf: Prf::HmacSha256,
salt: "b6a9338a7ccd39288a86dba73bfecd9101b4f3db9c9830e7c76afdbd4f6872e5".into(),
}),
mac: "16381463ea11c6eb2239a9f339c2e780516d29d234ce30ac5f166f9080b5a262".into(),
},
meta: Some("{}".into()),
};
#[test]
fn to_and_from_json() {
let file = VaultFile {
crypto: Crypto {
cipher: Cipher::Aes128Ctr(Aes128Ctr {
iv: "0155e3690be19fbfbecabcd440aa284b".into(),
}),
ciphertext: "4d6938a1f49b7782".into(),
kdf: Kdf::Pbkdf2(Pbkdf2 {
c: *ITERATIONS,
dklen: 32,
prf: Prf::HmacSha256,
salt: "b6a9338a7ccd39288a86dba73bfecd9101b4f3db9c9830e7c76afdbd4f6872e5".into(),
}),
mac: "16381463ea11c6eb2239a9f339c2e780516d29d234ce30ac5f166f9080b5a262".into(),
},
meta: Some("{}".into()),
};
let serialized = serde_json::to_string(&file).unwrap();
let deserialized = serde_json::from_str(&serialized).unwrap();
let serialized = serde_json::to_string(&file).unwrap();
let deserialized = serde_json::from_str(&serialized).unwrap();
assert_eq!(file, deserialized);
}
assert_eq!(file, deserialized);
}
#[test]
fn to_and_from_json_no_meta() {
let file = VaultFile {
crypto: Crypto {
cipher: Cipher::Aes128Ctr(Aes128Ctr {
iv: "0155e3690be19fbfbecabcd440aa284b".into(),
}),
ciphertext: "4d6938a1f49b7782".into(),
kdf: Kdf::Pbkdf2(Pbkdf2 {
c: *ITERATIONS,
dklen: 32,
prf: Prf::HmacSha256,
salt: "b6a9338a7ccd39288a86dba73bfecd9101b4f3db9c9830e7c76afdbd4f6872e5".into(),
}),
mac: "16381463ea11c6eb2239a9f339c2e780516d29d234ce30ac5f166f9080b5a262".into(),
},
meta: None,
};
#[test]
fn to_and_from_json_no_meta() {
let file = VaultFile {
crypto: Crypto {
cipher: Cipher::Aes128Ctr(Aes128Ctr {
iv: "0155e3690be19fbfbecabcd440aa284b".into(),
}),
ciphertext: "4d6938a1f49b7782".into(),
kdf: Kdf::Pbkdf2(Pbkdf2 {
c: *ITERATIONS,
dklen: 32,
prf: Prf::HmacSha256,
salt: "b6a9338a7ccd39288a86dba73bfecd9101b4f3db9c9830e7c76afdbd4f6872e5".into(),
}),
mac: "16381463ea11c6eb2239a9f339c2e780516d29d234ce30ac5f166f9080b5a262".into(),
},
meta: None,
};
let serialized = serde_json::to_string(&file).unwrap();
let deserialized = serde_json::from_str(&serialized).unwrap();
let serialized = serde_json::to_string(&file).unwrap();
let deserialized = serde_json::from_str(&serialized).unwrap();
assert_eq!(file, deserialized);
}
assert_eq!(file, deserialized);
}
}

View File

@ -14,12 +14,10 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::io::{Read, Write};
use super::{Crypto, Uuid, Version, H160};
use serde::de::Error;
use serde_json;
use serde_json::value::Value;
use serde_json::error;
use super::{Uuid, Version, Crypto, H160};
use serde_json::{self, error, value::Value};
use std::io::{Read, Write};
/// Meta key name for vault field
const VAULT_NAME_META_KEY: &'static str = "vault";
@ -27,94 +25,112 @@ const VAULT_NAME_META_KEY: &'static str = "vault";
/// Key file as stored in vaults
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct VaultKeyFile {
/// Key id
pub id: Uuid,
/// Key version
pub version: Version,
/// Secret, encrypted with account password
pub crypto: Crypto,
/// Serialized `VaultKeyMeta`, encrypted with vault password
pub metacrypto: Crypto,
/// Key id
pub id: Uuid,
/// Key version
pub version: Version,
/// Secret, encrypted with account password
pub crypto: Crypto,
/// Serialized `VaultKeyMeta`, encrypted with vault password
pub metacrypto: Crypto,
}
/// Data, stored in `VaultKeyFile::metacrypto`
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct VaultKeyMeta {
/// Key address
pub address: H160,
/// Key name
pub name: Option<String>,
/// Key metadata
pub meta: Option<String>,
/// Key address
pub address: H160,
/// Key name
pub name: Option<String>,
/// Key metadata
pub meta: Option<String>,
}
/// Insert vault name to the JSON meta field
pub fn insert_vault_name_to_json_meta(meta: &str, vault_name: &str) -> Result<String, error::Error> {
let mut meta = if meta.is_empty() {
Value::Object(serde_json::Map::new())
} else {
serde_json::from_str(meta)?
};
pub fn insert_vault_name_to_json_meta(
meta: &str,
vault_name: &str,
) -> Result<String, error::Error> {
let mut meta = if meta.is_empty() {
Value::Object(serde_json::Map::new())
} else {
serde_json::from_str(meta)?
};
if let Some(meta_obj) = meta.as_object_mut() {
meta_obj.insert(VAULT_NAME_META_KEY.to_owned(), Value::String(vault_name.to_owned()));
serde_json::to_string(meta_obj)
} else {
Err(error::Error::custom("Meta is expected to be a serialized JSON object"))
}
if let Some(meta_obj) = meta.as_object_mut() {
meta_obj.insert(
VAULT_NAME_META_KEY.to_owned(),
Value::String(vault_name.to_owned()),
);
serde_json::to_string(meta_obj)
} else {
Err(error::Error::custom(
"Meta is expected to be a serialized JSON object",
))
}
}
/// Remove vault name from the JSON meta field
pub fn remove_vault_name_from_json_meta(meta: &str) -> Result<String, error::Error> {
let mut meta = if meta.is_empty() {
Value::Object(serde_json::Map::new())
} else {
serde_json::from_str(meta)?
};
let mut meta = if meta.is_empty() {
Value::Object(serde_json::Map::new())
} else {
serde_json::from_str(meta)?
};
if let Some(meta_obj) = meta.as_object_mut() {
meta_obj.remove(VAULT_NAME_META_KEY);
serde_json::to_string(meta_obj)
} else {
Err(error::Error::custom("Meta is expected to be a serialized JSON object"))
}
if let Some(meta_obj) = meta.as_object_mut() {
meta_obj.remove(VAULT_NAME_META_KEY);
serde_json::to_string(meta_obj)
} else {
Err(error::Error::custom(
"Meta is expected to be a serialized JSON object",
))
}
}
impl VaultKeyFile {
pub fn load<R>(reader: R) -> Result<Self, serde_json::Error> where R: Read {
serde_json::from_reader(reader)
}
pub fn load<R>(reader: R) -> Result<Self, serde_json::Error>
where
R: Read,
{
serde_json::from_reader(reader)
}
pub fn write<W>(&self, writer: &mut W) -> Result<(), serde_json::Error> where W: Write {
serde_json::to_writer(writer, self)
}
pub fn write<W>(&self, writer: &mut W) -> Result<(), serde_json::Error>
where
W: Write,
{
serde_json::to_writer(writer, self)
}
}
impl VaultKeyMeta {
pub fn load(bytes: &[u8]) -> Result<Self, serde_json::Error> {
serde_json::from_slice(&bytes)
}
pub fn load(bytes: &[u8]) -> Result<Self, serde_json::Error> {
serde_json::from_slice(&bytes)
}
pub fn write(&self) -> Result<Vec<u8>, serde_json::Error> {
let s = serde_json::to_string(self)?;
Ok(s.as_bytes().into())
}
pub fn write(&self) -> Result<Vec<u8>, serde_json::Error> {
let s = serde_json::to_string(self)?;
Ok(s.as_bytes().into())
}
}
#[cfg(test)]
mod test {
use serde_json;
use json::{VaultKeyFile, Version, Crypto, Cipher, Aes128Ctr, Kdf, Pbkdf2, Prf,
insert_vault_name_to_json_meta, remove_vault_name_from_json_meta};
use std::num::NonZeroU32;
use json::{
insert_vault_name_to_json_meta, remove_vault_name_from_json_meta, Aes128Ctr, Cipher,
Crypto, Kdf, Pbkdf2, Prf, VaultKeyFile, Version,
};
use serde_json;
use std::num::NonZeroU32;
lazy_static! {
static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(10240).expect("10240 > 0; qed");
}
lazy_static! {
static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(10240).expect("10240 > 0; qed");
}
#[test]
fn to_and_from_json() {
let file = VaultKeyFile {
#[test]
fn to_and_from_json() {
let file = VaultKeyFile {
id: "08d82c39-88e3-7a71-6abb-89c8f36c3ceb".into(),
version: Version::V3,
crypto: Crypto {
@ -145,33 +161,45 @@ mod test {
}
};
let serialized = serde_json::to_string(&file).unwrap();
let deserialized = serde_json::from_str(&serialized).unwrap();
let serialized = serde_json::to_string(&file).unwrap();
let deserialized = serde_json::from_str(&serialized).unwrap();
assert_eq!(file, deserialized);
}
assert_eq!(file, deserialized);
}
#[test]
fn vault_name_inserted_to_json_meta() {
assert_eq!(insert_vault_name_to_json_meta(r#""#, "MyVault").unwrap(), r#"{"vault":"MyVault"}"#);
assert_eq!(insert_vault_name_to_json_meta(r#"{"tags":["kalabala"]}"#, "MyVault").unwrap(), r#"{"tags":["kalabala"],"vault":"MyVault"}"#);
}
#[test]
fn vault_name_inserted_to_json_meta() {
assert_eq!(
insert_vault_name_to_json_meta(r#""#, "MyVault").unwrap(),
r#"{"vault":"MyVault"}"#
);
assert_eq!(
insert_vault_name_to_json_meta(r#"{"tags":["kalabala"]}"#, "MyVault").unwrap(),
r#"{"tags":["kalabala"],"vault":"MyVault"}"#
);
}
#[test]
fn vault_name_not_inserted_to_json_meta() {
assert!(insert_vault_name_to_json_meta(r#"///3533"#, "MyVault").is_err());
assert!(insert_vault_name_to_json_meta(r#""string""#, "MyVault").is_err());
}
#[test]
fn vault_name_not_inserted_to_json_meta() {
assert!(insert_vault_name_to_json_meta(r#"///3533"#, "MyVault").is_err());
assert!(insert_vault_name_to_json_meta(r#""string""#, "MyVault").is_err());
}
#[test]
fn vault_name_removed_from_json_meta() {
assert_eq!(remove_vault_name_from_json_meta(r#"{"vault":"MyVault"}"#).unwrap(), r#"{}"#);
assert_eq!(remove_vault_name_from_json_meta(r#"{"tags":["kalabala"],"vault":"MyVault"}"#).unwrap(), r#"{"tags":["kalabala"]}"#);
}
#[test]
fn vault_name_removed_from_json_meta() {
assert_eq!(
remove_vault_name_from_json_meta(r#"{"vault":"MyVault"}"#).unwrap(),
r#"{}"#
);
assert_eq!(
remove_vault_name_from_json_meta(r#"{"tags":["kalabala"],"vault":"MyVault"}"#).unwrap(),
r#"{"tags":["kalabala"]}"#
);
}
#[test]
fn vault_name_not_removed_from_json_meta() {
assert!(remove_vault_name_from_json_meta(r#"///3533"#).is_err());
assert!(remove_vault_name_from_json_meta(r#""string""#).is_err());
}
#[test]
fn vault_name_not_removed_from_json_meta() {
assert!(remove_vault_name_from_json_meta(r#"///3533"#).is_err());
assert!(remove_vault_name_from_json_meta(r#""string""#).is_err());
}
}

View File

@ -14,45 +14,54 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::fmt;
use serde::{Serialize, Serializer, Deserialize, Deserializer};
use serde::de::{Error as SerdeError, Visitor};
use super::Error;
use serde::{
de::{Error as SerdeError, Visitor},
Deserialize, Deserializer, Serialize, Serializer,
};
use std::fmt;
#[derive(Debug, PartialEq)]
pub enum Version {
V3,
V3,
}
impl Serialize for Version {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
match *self {
Version::V3 => serializer.serialize_u64(3)
}
}
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
Version::V3 => serializer.serialize_u64(3),
}
}
}
impl<'a> Deserialize<'a> for Version {
fn deserialize<D>(deserializer: D) -> Result<Version, D::Error>
where D: Deserializer<'a> {
deserializer.deserialize_any(VersionVisitor)
}
fn deserialize<D>(deserializer: D) -> Result<Version, D::Error>
where
D: Deserializer<'a>,
{
deserializer.deserialize_any(VersionVisitor)
}
}
struct VersionVisitor;
impl<'a> Visitor<'a> for VersionVisitor {
type Value = Version;
type Value = Version;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a valid key version identifier")
}
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a valid key version identifier")
}
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E> where E: SerdeError {
match value {
3 => Ok(Version::V3),
_ => Err(SerdeError::custom(Error::UnsupportedVersion))
}
}
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E>
where
E: SerdeError,
{
match value {
3 => Ok(Version::V3),
_ => Err(SerdeError::custom(Error::UnsupportedVersion)),
}
}
}

View File

@ -27,13 +27,13 @@ extern crate rustc_hex;
extern crate serde;
extern crate serde_json;
extern crate smallvec;
extern crate tempdir;
extern crate time;
extern crate tiny_keccak;
extern crate tempdir;
extern crate parity_crypto as crypto;
extern crate ethereum_types;
extern crate ethkey as _ethkey;
extern crate parity_crypto as crypto;
extern crate parity_wordlist;
#[macro_use]
@ -60,18 +60,20 @@ mod presale;
mod random;
mod secret_store;
pub use self::account::{SafeAccount, Crypto};
pub use self::error::Error;
pub use self::ethstore::{EthStore, EthMultiStore};
pub use self::import::{import_account, import_accounts, read_geth_accounts};
pub use self::json::OpaqueKeyFile as KeyFile;
pub use self::presale::PresaleWallet;
pub use self::secret_store::{
SecretVaultRef, StoreAccountRef, SimpleSecretStore, SecretStore,
Derivation, IndexDerivation,
pub use self::{
account::{Crypto, SafeAccount},
error::Error,
ethstore::{EthMultiStore, EthStore},
import::{import_account, import_accounts, read_geth_accounts},
json::OpaqueKeyFile as KeyFile,
parity_wordlist::random_phrase,
presale::PresaleWallet,
random::random_string,
secret_store::{
Derivation, IndexDerivation, SecretStore, SecretVaultRef, SimpleSecretStore,
StoreAccountRef,
},
};
pub use self::random::random_string;
pub use self::parity_wordlist::random_phrase;
/// An opaque wrapper for secret.
pub struct OpaqueSecret(::ethkey::Secret);

View File

@ -14,78 +14,80 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::fs;
use std::num::NonZeroU32;
use std::path::Path;
use crypto::{self, pbkdf2, Keccak256};
use ethkey::{Address, KeyPair, Password, Secret};
use json;
use ethkey::{Address, Secret, KeyPair, Password};
use crypto::{Keccak256, pbkdf2};
use {crypto, Error};
use std::{fs, num::NonZeroU32, path::Path};
use Error;
/// Pre-sale wallet.
pub struct PresaleWallet {
iv: [u8; 16],
ciphertext: Vec<u8>,
address: Address,
iv: [u8; 16],
ciphertext: Vec<u8>,
address: Address,
}
impl From<json::PresaleWallet> for PresaleWallet {
fn from(wallet: json::PresaleWallet) -> Self {
let mut iv = [0u8; 16];
iv.copy_from_slice(&wallet.encseed[..16]);
fn from(wallet: json::PresaleWallet) -> Self {
let mut iv = [0u8; 16];
iv.copy_from_slice(&wallet.encseed[..16]);
let mut ciphertext = vec![];
ciphertext.extend_from_slice(&wallet.encseed[16..]);
let mut ciphertext = vec![];
ciphertext.extend_from_slice(&wallet.encseed[16..]);
PresaleWallet {
iv: iv,
ciphertext: ciphertext,
address: Address::from(wallet.address),
}
}
PresaleWallet {
iv: iv,
ciphertext: ciphertext,
address: Address::from(wallet.address),
}
}
}
impl PresaleWallet {
/// Open a pre-sale wallet.
pub fn open<P>(path: P) -> Result<Self, Error> where P: AsRef<Path> {
let file = fs::File::open(path)?;
let presale = json::PresaleWallet::load(file)
.map_err(|e| Error::InvalidKeyFile(format!("{}", e)))?;
Ok(PresaleWallet::from(presale))
}
/// Open a pre-sale wallet.
pub fn open<P>(path: P) -> Result<Self, Error>
where
P: AsRef<Path>,
{
let file = fs::File::open(path)?;
let presale =
json::PresaleWallet::load(file).map_err(|e| Error::InvalidKeyFile(format!("{}", e)))?;
Ok(PresaleWallet::from(presale))
}
/// Decrypt the wallet.
pub fn decrypt(&self, password: &Password) -> Result<KeyPair, Error> {
let mut derived_key = [0u8; 32];
let salt = pbkdf2::Salt(password.as_bytes());
let sec = pbkdf2::Secret(password.as_bytes());
let iter = NonZeroU32::new(2000).expect("2000 > 0; qed");
pbkdf2::sha256(iter, salt, sec, &mut derived_key);
/// Decrypt the wallet.
pub fn decrypt(&self, password: &Password) -> Result<KeyPair, Error> {
let mut derived_key = [0u8; 32];
let salt = pbkdf2::Salt(password.as_bytes());
let sec = pbkdf2::Secret(password.as_bytes());
let iter = NonZeroU32::new(2000).expect("2000 > 0; qed");
pbkdf2::sha256(iter, salt, sec, &mut derived_key);
let mut key = vec![0; self.ciphertext.len()];
let len = crypto::aes::decrypt_128_cbc(&derived_key[0..16], &self.iv, &self.ciphertext, &mut key)
.map_err(|_| Error::InvalidPassword)?;
let unpadded = &key[..len];
let mut key = vec![0; self.ciphertext.len()];
let len =
crypto::aes::decrypt_128_cbc(&derived_key[0..16], &self.iv, &self.ciphertext, &mut key)
.map_err(|_| Error::InvalidPassword)?;
let unpadded = &key[..len];
let secret = Secret::from_unsafe_slice(&unpadded.keccak256())?;
if let Ok(kp) = KeyPair::from_secret(secret) {
if kp.address() == self.address {
return Ok(kp)
}
}
let secret = Secret::from_unsafe_slice(&unpadded.keccak256())?;
if let Ok(kp) = KeyPair::from_secret(secret) {
if kp.address() == self.address {
return Ok(kp);
}
}
Err(Error::InvalidPassword)
}
Err(Error::InvalidPassword)
}
}
#[cfg(test)]
mod tests {
use super::PresaleWallet;
use json;
use super::PresaleWallet;
use json;
#[test]
fn test() {
let json = r#"
#[test]
fn test() {
let json = r#"
{
"encseed": "137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066",
"ethaddr": "ede84640d1a1d3e06902048e67aa7db8d52c2ce1",
@ -93,9 +95,9 @@ mod tests {
"btcaddr": "1JvqEc6WLhg6GnyrLBe2ztPAU28KRfuseH"
} "#;
let wallet = json::PresaleWallet::load(json.as_bytes()).unwrap();
let wallet = PresaleWallet::from(wallet);
assert!(wallet.decrypt(&"123".into()).is_ok());
assert!(wallet.decrypt(&"124".into()).is_err());
}
let wallet = json::PresaleWallet::load(json.as_bytes()).unwrap();
let wallet = PresaleWallet::from(wallet);
assert!(wallet.decrypt(&"123".into()).is_ok());
assert!(wallet.decrypt(&"124".into()).is_err());
}
}

View File

@ -14,32 +14,34 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use rand::{Rng, OsRng};
use rand::{OsRng, Rng};
pub trait Random {
fn random() -> Self where Self: Sized;
fn random() -> Self
where
Self: Sized;
}
impl Random for [u8; 16] {
fn random() -> Self {
let mut result = [0u8; 16];
let mut rng = OsRng::new().unwrap();
rng.fill_bytes(&mut result);
result
}
fn random() -> Self {
let mut result = [0u8; 16];
let mut rng = OsRng::new().unwrap();
rng.fill_bytes(&mut result);
result
}
}
impl Random for [u8; 32] {
fn random() -> Self {
let mut result = [0u8; 32];
let mut rng = OsRng::new().unwrap();
rng.fill_bytes(&mut result);
result
}
fn random() -> Self {
let mut result = [0u8; 32];
let mut rng = OsRng::new().unwrap();
rng.fill_bytes(&mut result);
result
}
}
/// Generate a random string of given length.
pub fn random_string(length: usize) -> String {
let mut rng = OsRng::new().expect("Not able to operate without random source.");
rng.gen_ascii_chars().take(length).collect()
let mut rng = OsRng::new().expect("Not able to operate without random source.");
rng.gen_ascii_chars().take(length).collect()
}

View File

@ -14,177 +14,264 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::hash::{Hash, Hasher};
use std::path::PathBuf;
use std::cmp::Ordering;
use ethkey::{Address, Message, Signature, Secret, Password, Public};
use Error;
use json::{Uuid, OpaqueKeyFile};
use ethereum_types::H256;
use ethkey::{Address, Message, Password, Public, Secret, Signature};
use json::{OpaqueKeyFile, Uuid};
use std::{
cmp::Ordering,
hash::{Hash, Hasher},
path::PathBuf,
};
use Error;
use OpaqueSecret;
/// Key directory reference
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub enum SecretVaultRef {
/// Reference to key in root directory
Root,
/// Referenc to key in specific vault
Vault(String),
/// Reference to key in root directory
Root,
/// Referenc to key in specific vault
Vault(String),
}
/// Stored account reference
#[derive(Debug, Clone, PartialEq, Eq, Ord)]
pub struct StoreAccountRef {
/// Account address
pub address: Address,
/// Vault reference
pub vault: SecretVaultRef,
/// Account address
pub address: Address,
/// Vault reference
pub vault: SecretVaultRef,
}
impl PartialOrd for StoreAccountRef {
fn partial_cmp(&self, other: &StoreAccountRef) -> Option<Ordering> {
Some(self.address.cmp(&other.address).then_with(|| self.vault.cmp(&other.vault)))
}
fn partial_cmp(&self, other: &StoreAccountRef) -> Option<Ordering> {
Some(
self.address
.cmp(&other.address)
.then_with(|| self.vault.cmp(&other.vault)),
)
}
}
impl ::std::borrow::Borrow<Address> for StoreAccountRef {
fn borrow(&self) -> &Address {
&self.address
}
fn borrow(&self) -> &Address {
&self.address
}
}
/// Simple Secret Store API
pub trait SimpleSecretStore: Send + Sync {
/// Inserts new accounts to the store (or vault) with given password.
fn insert_account(&self, vault: SecretVaultRef, secret: Secret, password: &Password) -> Result<StoreAccountRef, Error>;
/// Inserts new derived account to the store (or vault) with given password.
fn insert_derived(&self, vault: SecretVaultRef, account_ref: &StoreAccountRef, password: &Password, derivation: Derivation) -> Result<StoreAccountRef, Error>;
/// Changes accounts password.
fn change_password(&self, account: &StoreAccountRef, old_password: &Password, new_password: &Password) -> Result<(), Error>;
/// Exports key details for account.
fn export_account(&self, account: &StoreAccountRef, password: &Password) -> Result<OpaqueKeyFile, Error>;
/// Entirely removes account from the store and underlying storage.
fn remove_account(&self, account: &StoreAccountRef, password: &Password) -> Result<(), Error>;
/// Generates new derived account.
fn generate_derived(&self, account_ref: &StoreAccountRef, password: &Password, derivation: Derivation) -> Result<Address, Error>;
/// Sign a message with given account.
fn sign(&self, account: &StoreAccountRef, password: &Password, message: &Message) -> Result<Signature, Error>;
/// Sign a message with derived account.
fn sign_derived(&self, account_ref: &StoreAccountRef, password: &Password, derivation: Derivation, message: &Message) -> Result<Signature, Error>;
/// Decrypt a messages with given account.
fn decrypt(&self, account: &StoreAccountRef, password: &Password, shared_mac: &[u8], message: &[u8]) -> Result<Vec<u8>, Error>;
/// Agree on shared key.
fn agree(&self, account: &StoreAccountRef, password: &Password, other: &Public) -> Result<Secret, Error>;
/// Inserts new accounts to the store (or vault) with given password.
fn insert_account(
&self,
vault: SecretVaultRef,
secret: Secret,
password: &Password,
) -> Result<StoreAccountRef, Error>;
/// Inserts new derived account to the store (or vault) with given password.
fn insert_derived(
&self,
vault: SecretVaultRef,
account_ref: &StoreAccountRef,
password: &Password,
derivation: Derivation,
) -> Result<StoreAccountRef, Error>;
/// Changes accounts password.
fn change_password(
&self,
account: &StoreAccountRef,
old_password: &Password,
new_password: &Password,
) -> Result<(), Error>;
/// Exports key details for account.
fn export_account(
&self,
account: &StoreAccountRef,
password: &Password,
) -> Result<OpaqueKeyFile, Error>;
/// Entirely removes account from the store and underlying storage.
fn remove_account(&self, account: &StoreAccountRef, password: &Password) -> Result<(), Error>;
/// Generates new derived account.
fn generate_derived(
&self,
account_ref: &StoreAccountRef,
password: &Password,
derivation: Derivation,
) -> Result<Address, Error>;
/// Sign a message with given account.
fn sign(
&self,
account: &StoreAccountRef,
password: &Password,
message: &Message,
) -> Result<Signature, Error>;
/// Sign a message with derived account.
fn sign_derived(
&self,
account_ref: &StoreAccountRef,
password: &Password,
derivation: Derivation,
message: &Message,
) -> Result<Signature, Error>;
/// Decrypt a messages with given account.
fn decrypt(
&self,
account: &StoreAccountRef,
password: &Password,
shared_mac: &[u8],
message: &[u8],
) -> Result<Vec<u8>, Error>;
/// Agree on shared key.
fn agree(
&self,
account: &StoreAccountRef,
password: &Password,
other: &Public,
) -> Result<Secret, Error>;
/// Returns all accounts in this secret store.
fn accounts(&self) -> Result<Vec<StoreAccountRef>, Error>;
/// Get reference to some account with given address.
/// This method could be removed if we will guarantee that there is max(1) account for given address.
fn account_ref(&self, address: &Address) -> Result<StoreAccountRef, Error>;
/// Returns all accounts in this secret store.
fn accounts(&self) -> Result<Vec<StoreAccountRef>, Error>;
/// Get reference to some account with given address.
/// This method could be removed if we will guarantee that there is max(1) account for given address.
fn account_ref(&self, address: &Address) -> Result<StoreAccountRef, Error>;
/// Create new vault with given password
fn create_vault(&self, name: &str, password: &Password) -> Result<(), Error>;
/// Open vault with given password
fn open_vault(&self, name: &str, password: &Password) -> Result<(), Error>;
/// Close vault
fn close_vault(&self, name: &str) -> Result<(), Error>;
/// List all vaults
fn list_vaults(&self) -> Result<Vec<String>, Error>;
/// List all currently opened vaults
fn list_opened_vaults(&self) -> Result<Vec<String>, Error>;
/// Change vault password
fn change_vault_password(&self, name: &str, new_password: &Password) -> Result<(), Error>;
/// Cnage account' vault
fn change_account_vault(&self, vault: SecretVaultRef, account: StoreAccountRef) -> Result<StoreAccountRef, Error>;
/// Get vault metadata string.
fn get_vault_meta(&self, name: &str) -> Result<String, Error>;
/// Set vault metadata string.
fn set_vault_meta(&self, name: &str, meta: &str) -> Result<(), Error>;
/// Create new vault with given password
fn create_vault(&self, name: &str, password: &Password) -> Result<(), Error>;
/// Open vault with given password
fn open_vault(&self, name: &str, password: &Password) -> Result<(), Error>;
/// Close vault
fn close_vault(&self, name: &str) -> Result<(), Error>;
/// List all vaults
fn list_vaults(&self) -> Result<Vec<String>, Error>;
/// List all currently opened vaults
fn list_opened_vaults(&self) -> Result<Vec<String>, Error>;
/// Change vault password
fn change_vault_password(&self, name: &str, new_password: &Password) -> Result<(), Error>;
/// Cnage account' vault
fn change_account_vault(
&self,
vault: SecretVaultRef,
account: StoreAccountRef,
) -> Result<StoreAccountRef, Error>;
/// Get vault metadata string.
fn get_vault_meta(&self, name: &str) -> Result<String, Error>;
/// Set vault metadata string.
fn set_vault_meta(&self, name: &str, meta: &str) -> Result<(), Error>;
}
/// Secret Store API
pub trait SecretStore: SimpleSecretStore {
/// Returns a raw opaque Secret that can be later used to sign a message.
fn raw_secret(
&self,
account: &StoreAccountRef,
password: &Password,
) -> Result<OpaqueSecret, Error>;
/// Returns a raw opaque Secret that can be later used to sign a message.
fn raw_secret(&self, account: &StoreAccountRef, password: &Password) -> Result<OpaqueSecret, Error>;
/// Signs a message with raw secret.
fn sign_with_secret(
&self,
secret: &OpaqueSecret,
message: &Message,
) -> Result<Signature, Error> {
Ok(::ethkey::sign(&secret.0, message)?)
}
/// Signs a message with raw secret.
fn sign_with_secret(&self, secret: &OpaqueSecret, message: &Message) -> Result<Signature, Error> {
Ok(::ethkey::sign(&secret.0, message)?)
}
/// Imports presale wallet
fn import_presale(
&self,
vault: SecretVaultRef,
json: &[u8],
password: &Password,
) -> Result<StoreAccountRef, Error>;
/// Imports existing JSON wallet
fn import_wallet(
&self,
vault: SecretVaultRef,
json: &[u8],
password: &Password,
gen_id: bool,
) -> Result<StoreAccountRef, Error>;
/// Copies account between stores and vaults.
fn copy_account(
&self,
new_store: &SimpleSecretStore,
new_vault: SecretVaultRef,
account: &StoreAccountRef,
password: &Password,
new_password: &Password,
) -> Result<(), Error>;
/// Checks if password matches given account.
fn test_password(&self, account: &StoreAccountRef, password: &Password) -> Result<bool, Error>;
/// Imports presale wallet
fn import_presale(&self, vault: SecretVaultRef, json: &[u8], password: &Password) -> Result<StoreAccountRef, Error>;
/// Imports existing JSON wallet
fn import_wallet(&self, vault: SecretVaultRef, json: &[u8], password: &Password, gen_id: bool) -> Result<StoreAccountRef, Error>;
/// Copies account between stores and vaults.
fn copy_account(&self, new_store: &SimpleSecretStore, new_vault: SecretVaultRef, account: &StoreAccountRef, password: &Password, new_password: &Password) -> Result<(), Error>;
/// Checks if password matches given account.
fn test_password(&self, account: &StoreAccountRef, password: &Password) -> Result<bool, Error>;
/// Returns a public key for given account.
fn public(&self, account: &StoreAccountRef, password: &Password) -> Result<Public, Error>;
/// Returns a public key for given account.
fn public(&self, account: &StoreAccountRef, password: &Password) -> Result<Public, Error>;
/// Returns uuid of an account.
fn uuid(&self, account: &StoreAccountRef) -> Result<Uuid, Error>;
/// Returns account's name.
fn name(&self, account: &StoreAccountRef) -> Result<String, Error>;
/// Returns account's metadata.
fn meta(&self, account: &StoreAccountRef) -> Result<String, Error>;
/// Returns uuid of an account.
fn uuid(&self, account: &StoreAccountRef) -> Result<Uuid, Error>;
/// Returns account's name.
fn name(&self, account: &StoreAccountRef) -> Result<String, Error>;
/// Returns account's metadata.
fn meta(&self, account: &StoreAccountRef) -> Result<String, Error>;
/// Modifies account metadata.
fn set_name(&self, account: &StoreAccountRef, name: String) -> Result<(), Error>;
/// Modifies account name.
fn set_meta(&self, account: &StoreAccountRef, meta: String) -> Result<(), Error>;
/// Modifies account metadata.
fn set_name(&self, account: &StoreAccountRef, name: String) -> Result<(), Error>;
/// Modifies account name.
fn set_meta(&self, account: &StoreAccountRef, meta: String) -> Result<(), Error>;
/// Returns local path of the store.
fn local_path(&self) -> PathBuf;
/// Lists all found geth accounts.
fn list_geth_accounts(&self, testnet: bool) -> Vec<Address>;
/// Imports geth accounts to the store/vault.
fn import_geth_accounts(&self, vault: SecretVaultRef, desired: Vec<Address>, testnet: bool) -> Result<Vec<StoreAccountRef>, Error>;
/// Returns local path of the store.
fn local_path(&self) -> PathBuf;
/// Lists all found geth accounts.
fn list_geth_accounts(&self, testnet: bool) -> Vec<Address>;
/// Imports geth accounts to the store/vault.
fn import_geth_accounts(
&self,
vault: SecretVaultRef,
desired: Vec<Address>,
testnet: bool,
) -> Result<Vec<StoreAccountRef>, Error>;
}
impl StoreAccountRef {
/// Create reference to root account with given address
pub fn root(address: Address) -> Self {
StoreAccountRef::new(SecretVaultRef::Root, address)
}
/// Create reference to root account with given address
pub fn root(address: Address) -> Self {
StoreAccountRef::new(SecretVaultRef::Root, address)
}
/// Create reference to vault account with given address
pub fn vault(vault_name: &str, address: Address) -> Self {
StoreAccountRef::new(SecretVaultRef::Vault(vault_name.to_owned()), address)
}
/// Create reference to vault account with given address
pub fn vault(vault_name: &str, address: Address) -> Self {
StoreAccountRef::new(SecretVaultRef::Vault(vault_name.to_owned()), address)
}
/// Create new account reference
pub fn new(vault_ref: SecretVaultRef, address: Address) -> Self {
StoreAccountRef {
vault: vault_ref,
address: address,
}
}
/// Create new account reference
pub fn new(vault_ref: SecretVaultRef, address: Address) -> Self {
StoreAccountRef {
vault: vault_ref,
address: address,
}
}
}
impl Hash for StoreAccountRef {
fn hash<H: Hasher>(&self, state: &mut H) {
self.address.hash(state);
}
fn hash<H: Hasher>(&self, state: &mut H) {
self.address.hash(state);
}
}
/// Node in hierarchical derivation.
pub struct IndexDerivation {
/// Node is soft (allows proof of parent from parent node).
pub soft: bool,
/// Index sequence of the node.
pub index: u32,
/// Node is soft (allows proof of parent from parent node).
pub soft: bool,
/// Index sequence of the node.
pub index: u32,
}
/// Derivation scheme for keys
pub enum Derivation {
/// Hierarchical derivation
Hierarchical(Vec<IndexDerivation>),
/// Hash derivation, soft.
SoftHash(H256),
/// Hash derivation, hard.
HardHash(H256),
/// Hierarchical derivation
Hierarchical(Vec<IndexDerivation>),
/// Hash derivation, soft.
SoftHash(H256),
/// Hash derivation, hard.
HardHash(H256),
}

View File

@ -14,141 +14,184 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
extern crate rand;
extern crate ethstore;
extern crate rand;
mod util;
use ethstore::{EthStore, SimpleSecretStore, SecretVaultRef, StoreAccountRef};
use ethstore::ethkey::{Random, Generator, Secret, KeyPair, verify_address};
use ethstore::accounts_dir::RootDiskDirectory;
use ethstore::{
accounts_dir::RootDiskDirectory,
ethkey::{verify_address, Generator, KeyPair, Random, Secret},
EthStore, SecretVaultRef, SimpleSecretStore, StoreAccountRef,
};
use util::TransientDir;
#[test]
fn secret_store_create() {
let dir = TransientDir::create().unwrap();
let _ = EthStore::open(Box::new(dir)).unwrap();
let dir = TransientDir::create().unwrap();
let _ = EthStore::open(Box::new(dir)).unwrap();
}
#[test]
#[should_panic]
fn secret_store_open_not_existing() {
let dir = TransientDir::open();
let _ = EthStore::open(Box::new(dir)).unwrap();
let dir = TransientDir::open();
let _ = EthStore::open(Box::new(dir)).unwrap();
}
fn random_secret() -> Secret {
Random.generate().unwrap().secret().clone()
Random.generate().unwrap().secret().clone()
}
#[test]
fn secret_store_create_account() {
let dir = TransientDir::create().unwrap();
let store = EthStore::open(Box::new(dir)).unwrap();
assert_eq!(store.accounts().unwrap().len(), 0);
assert!(store.insert_account(SecretVaultRef::Root, random_secret(), &"".into()).is_ok());
assert_eq!(store.accounts().unwrap().len(), 1);
assert!(store.insert_account(SecretVaultRef::Root, random_secret(), &"".into()).is_ok());
assert_eq!(store.accounts().unwrap().len(), 2);
let dir = TransientDir::create().unwrap();
let store = EthStore::open(Box::new(dir)).unwrap();
assert_eq!(store.accounts().unwrap().len(), 0);
assert!(store
.insert_account(SecretVaultRef::Root, random_secret(), &"".into())
.is_ok());
assert_eq!(store.accounts().unwrap().len(), 1);
assert!(store
.insert_account(SecretVaultRef::Root, random_secret(), &"".into())
.is_ok());
assert_eq!(store.accounts().unwrap().len(), 2);
}
#[test]
fn secret_store_sign() {
let dir = TransientDir::create().unwrap();
let store = EthStore::open(Box::new(dir)).unwrap();
assert!(store.insert_account(SecretVaultRef::Root, random_secret(), &"".into()).is_ok());
let accounts = store.accounts().unwrap();
assert_eq!(accounts.len(), 1);
assert!(store.sign(&accounts[0], &"".into(), &Default::default()).is_ok());
assert!(store.sign(&accounts[0], &"1".into(), &Default::default()).is_err());
let dir = TransientDir::create().unwrap();
let store = EthStore::open(Box::new(dir)).unwrap();
assert!(store
.insert_account(SecretVaultRef::Root, random_secret(), &"".into())
.is_ok());
let accounts = store.accounts().unwrap();
assert_eq!(accounts.len(), 1);
assert!(store
.sign(&accounts[0], &"".into(), &Default::default())
.is_ok());
assert!(store
.sign(&accounts[0], &"1".into(), &Default::default())
.is_err());
}
#[test]
fn secret_store_change_password() {
let dir = TransientDir::create().unwrap();
let store = EthStore::open(Box::new(dir)).unwrap();
assert!(store.insert_account(SecretVaultRef::Root, random_secret(), &"".into()).is_ok());
let accounts = store.accounts().unwrap();
assert_eq!(accounts.len(), 1);
assert!(store.sign(&accounts[0], &"".into(), &Default::default()).is_ok());
assert!(store.change_password(&accounts[0], &"".into(), &"1".into()).is_ok());
assert!(store.sign(&accounts[0], &"".into(), &Default::default()).is_err());
assert!(store.sign(&accounts[0], &"1".into(), &Default::default()).is_ok());
let dir = TransientDir::create().unwrap();
let store = EthStore::open(Box::new(dir)).unwrap();
assert!(store
.insert_account(SecretVaultRef::Root, random_secret(), &"".into())
.is_ok());
let accounts = store.accounts().unwrap();
assert_eq!(accounts.len(), 1);
assert!(store
.sign(&accounts[0], &"".into(), &Default::default())
.is_ok());
assert!(store
.change_password(&accounts[0], &"".into(), &"1".into())
.is_ok());
assert!(store
.sign(&accounts[0], &"".into(), &Default::default())
.is_err());
assert!(store
.sign(&accounts[0], &"1".into(), &Default::default())
.is_ok());
}
#[test]
fn secret_store_remove_account() {
let dir = TransientDir::create().unwrap();
let store = EthStore::open(Box::new(dir)).unwrap();
assert!(store.insert_account(SecretVaultRef::Root, random_secret(), &"".into()).is_ok());
let accounts = store.accounts().unwrap();
assert_eq!(accounts.len(), 1);
assert!(store.remove_account(&accounts[0], &"".into()).is_ok());
assert_eq!(store.accounts().unwrap().len(), 0);
assert!(store.remove_account(&accounts[0], &"".into()).is_err());
let dir = TransientDir::create().unwrap();
let store = EthStore::open(Box::new(dir)).unwrap();
assert!(store
.insert_account(SecretVaultRef::Root, random_secret(), &"".into())
.is_ok());
let accounts = store.accounts().unwrap();
assert_eq!(accounts.len(), 1);
assert!(store.remove_account(&accounts[0], &"".into()).is_ok());
assert_eq!(store.accounts().unwrap().len(), 0);
assert!(store.remove_account(&accounts[0], &"".into()).is_err());
}
fn test_path() -> &'static str {
match ::std::fs::metadata("ethstore") {
Ok(_) => "ethstore/tests/res/geth_keystore",
Err(_) => "tests/res/geth_keystore",
}
match ::std::fs::metadata("ethstore") {
Ok(_) => "ethstore/tests/res/geth_keystore",
Err(_) => "tests/res/geth_keystore",
}
}
fn pat_path() -> &'static str {
match ::std::fs::metadata("ethstore") {
Ok(_) => "ethstore/tests/res/pat",
Err(_) => "tests/res/pat",
}
match ::std::fs::metadata("ethstore") {
Ok(_) => "ethstore/tests/res/pat",
Err(_) => "tests/res/pat",
}
}
fn ciphertext_path() -> &'static str {
match ::std::fs::metadata("ethstore") {
Ok(_) => "ethstore/tests/res/ciphertext",
Err(_) => "tests/res/ciphertext",
}
match ::std::fs::metadata("ethstore") {
Ok(_) => "ethstore/tests/res/ciphertext",
Err(_) => "tests/res/ciphertext",
}
}
#[test]
fn secret_store_laod_geth_files() {
let dir = RootDiskDirectory::at(test_path());
let store = EthStore::open(Box::new(dir)).unwrap();
assert_eq!(store.accounts().unwrap(), vec![
StoreAccountRef::root("3f49624084b67849c7b4e805c5988c21a430f9d9".into()),
StoreAccountRef::root("5ba4dcf897e97c2bdf8315b9ef26c13c085988cf".into()),
StoreAccountRef::root("63121b431a52f8043c16fcf0d1df9cb7b5f66649".into()),
]);
let dir = RootDiskDirectory::at(test_path());
let store = EthStore::open(Box::new(dir)).unwrap();
assert_eq!(
store.accounts().unwrap(),
vec![
StoreAccountRef::root("3f49624084b67849c7b4e805c5988c21a430f9d9".into()),
StoreAccountRef::root("5ba4dcf897e97c2bdf8315b9ef26c13c085988cf".into()),
StoreAccountRef::root("63121b431a52f8043c16fcf0d1df9cb7b5f66649".into()),
]
);
}
#[test]
fn secret_store_load_pat_files() {
let dir = RootDiskDirectory::at(pat_path());
let store = EthStore::open(Box::new(dir)).unwrap();
assert_eq!(store.accounts().unwrap(), vec![
StoreAccountRef::root("3f49624084b67849c7b4e805c5988c21a430f9d9".into()),
StoreAccountRef::root("5ba4dcf897e97c2bdf8315b9ef26c13c085988cf".into()),
]);
let dir = RootDiskDirectory::at(pat_path());
let store = EthStore::open(Box::new(dir)).unwrap();
assert_eq!(
store.accounts().unwrap(),
vec![
StoreAccountRef::root("3f49624084b67849c7b4e805c5988c21a430f9d9".into()),
StoreAccountRef::root("5ba4dcf897e97c2bdf8315b9ef26c13c085988cf".into()),
]
);
}
#[test]
fn test_decrypting_files_with_short_ciphertext() {
// 31e9d1e6d844bd3a536800ef8d8be6a9975db509, 30
let kp1 = KeyPair::from_secret("000081c29e8142bb6a81bef5a92bda7a8328a5c85bb2f9542e76f9b0f94fc018".parse().unwrap()).unwrap();
// d1e64e5480bfaf733ba7d48712decb8227797a4e , 31
let kp2 = KeyPair::from_secret("00fa7b3db73dc7dfdf8c5fbdb796d741e4488628c41fc4febd9160a866ba0f35".parse().unwrap()).unwrap();
let dir = RootDiskDirectory::at(ciphertext_path());
let store = EthStore::open(Box::new(dir)).unwrap();
let accounts = store.accounts().unwrap();
assert_eq!(accounts, vec![
StoreAccountRef::root("31e9d1e6d844bd3a536800ef8d8be6a9975db509".into()),
StoreAccountRef::root("d1e64e5480bfaf733ba7d48712decb8227797a4e".into()),
]);
// 31e9d1e6d844bd3a536800ef8d8be6a9975db509, 30
let kp1 = KeyPair::from_secret(
"000081c29e8142bb6a81bef5a92bda7a8328a5c85bb2f9542e76f9b0f94fc018"
.parse()
.unwrap(),
)
.unwrap();
// d1e64e5480bfaf733ba7d48712decb8227797a4e , 31
let kp2 = KeyPair::from_secret(
"00fa7b3db73dc7dfdf8c5fbdb796d741e4488628c41fc4febd9160a866ba0f35"
.parse()
.unwrap(),
)
.unwrap();
let dir = RootDiskDirectory::at(ciphertext_path());
let store = EthStore::open(Box::new(dir)).unwrap();
let accounts = store.accounts().unwrap();
assert_eq!(
accounts,
vec![
StoreAccountRef::root("31e9d1e6d844bd3a536800ef8d8be6a9975db509".into()),
StoreAccountRef::root("d1e64e5480bfaf733ba7d48712decb8227797a4e".into()),
]
);
let message = Default::default();
let message = Default::default();
let s1 = store.sign(&accounts[0], &"foo".into(), &message).unwrap();
let s2 = store.sign(&accounts[1], &"foo".into(), &message).unwrap();
assert!(verify_address(&accounts[0].address, &s1, &message).unwrap());
assert!(verify_address(&kp1.address(), &s1, &message).unwrap());
assert!(verify_address(&kp2.address(), &s2, &message).unwrap());
let s1 = store.sign(&accounts[0], &"foo".into(), &message).unwrap();
let s2 = store.sign(&accounts[1], &"foo".into(), &message).unwrap();
assert!(verify_address(&accounts[0].address, &s1, &message).unwrap());
assert!(verify_address(&kp1.address(), &s1, &message).unwrap());
assert!(verify_address(&kp2.address(), &s2, &message).unwrap());
}

View File

@ -14,68 +14,69 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::path::PathBuf;
use std::{env, fs};
use rand::{Rng, OsRng};
use ethstore::accounts_dir::{KeyDirectory, RootDiskDirectory};
use ethstore::{Error, SafeAccount};
use ethstore::{
accounts_dir::{KeyDirectory, RootDiskDirectory},
Error, SafeAccount,
};
use rand::{OsRng, Rng};
use std::{env, fs, path::PathBuf};
pub fn random_dir() -> PathBuf {
let mut rng = OsRng::new().unwrap();
let mut dir = env::temp_dir();
dir.push(format!("{:x}-{:x}", rng.next_u64(), rng.next_u64()));
dir
let mut rng = OsRng::new().unwrap();
let mut dir = env::temp_dir();
dir.push(format!("{:x}-{:x}", rng.next_u64(), rng.next_u64()));
dir
}
pub struct TransientDir {
dir: RootDiskDirectory,
path: PathBuf,
dir: RootDiskDirectory,
path: PathBuf,
}
impl TransientDir {
pub fn create() -> Result<Self, Error> {
let path = random_dir();
let result = TransientDir {
dir: RootDiskDirectory::create(&path)?,
path: path,
};
pub fn create() -> Result<Self, Error> {
let path = random_dir();
let result = TransientDir {
dir: RootDiskDirectory::create(&path)?,
path: path,
};
Ok(result)
}
Ok(result)
}
pub fn open() -> Self {
let path = random_dir();
TransientDir {
dir: RootDiskDirectory::at(&path),
path: path,
}
}
pub fn open() -> Self {
let path = random_dir();
TransientDir {
dir: RootDiskDirectory::at(&path),
path: path,
}
}
}
impl Drop for TransientDir {
fn drop(&mut self) {
fs::remove_dir_all(&self.path).expect("Expected to remove temp dir");
}
fn drop(&mut self) {
fs::remove_dir_all(&self.path).expect("Expected to remove temp dir");
}
}
impl KeyDirectory for TransientDir {
fn load(&self) -> Result<Vec<SafeAccount>, Error> {
self.dir.load()
}
fn load(&self) -> Result<Vec<SafeAccount>, Error> {
self.dir.load()
}
fn update(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
self.dir.update(account)
}
fn update(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
self.dir.update(account)
}
fn insert(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
self.dir.insert(account)
}
fn insert(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
self.dir.insert(account)
}
fn remove(&self, account: &SafeAccount) -> Result<(), Error> {
self.dir.remove(account)
}
fn remove(&self, account: &SafeAccount) -> Result<(), Error> {
self.dir.remove(account)
}
fn unique_repr(&self) -> Result<u64, Error> {
self.dir.unique_repr()
}
fn unique_repr(&self) -> Result<u64, Error> {
self.dir.unique_repr()
}
}

View File

@ -19,83 +19,89 @@
extern crate ethereum_types;
extern crate ethkey;
use std::fmt;
use ethereum_types::U256;
use ethkey::{Address, Signature};
use std::fmt;
pub struct WalletInfo {
pub address: Address,
pub name: String,
pub manufacturer: String,
pub address: Address,
pub name: String,
pub manufacturer: String,
}
#[derive(Debug)]
/// `ErrorType` for devices with no `hardware wallet`
pub enum Error {
NoWallet,
KeyNotFound,
NoWallet,
KeyNotFound,
}
pub struct TransactionInfo {
/// Nonce
pub nonce: U256,
/// Gas price
pub gas_price: U256,
/// Gas limit
pub gas_limit: U256,
/// Receiver
pub to: Option<Address>,
/// Value
pub value: U256,
/// Data
pub data: Vec<u8>,
/// Chain ID
pub chain_id: Option<u64>,
/// Nonce
pub nonce: U256,
/// Gas price
pub gas_price: U256,
/// Gas limit
pub gas_limit: U256,
/// Receiver
pub to: Option<Address>,
/// Value
pub value: U256,
/// Data
pub data: Vec<u8>,
/// Chain ID
pub chain_id: Option<u64>,
}
pub enum KeyPath {
/// Ethereum.
Ethereum,
/// Ethereum classic.
EthereumClassic,
/// Ethereum.
Ethereum,
/// Ethereum classic.
EthereumClassic,
}
/// `HardwareWalletManager` for devices with no `hardware wallet`
pub struct HardwareWalletManager;
impl HardwareWalletManager {
pub fn new() -> Result<Self, Error> {
Err(Error::NoWallet)
}
pub fn new() -> Result<Self, Error> {
Err(Error::NoWallet)
}
pub fn set_key_path(&self, _key_path: KeyPath) {}
pub fn set_key_path(&self, _key_path: KeyPath) {}
pub fn wallet_info(&self, _: &Address) -> Option<WalletInfo> {
None
}
pub fn wallet_info(&self, _: &Address) -> Option<WalletInfo> {
None
}
pub fn list_wallets(&self) -> Vec<WalletInfo> {
Vec::with_capacity(0)
}
pub fn list_wallets(&self) -> Vec<WalletInfo> {
Vec::with_capacity(0)
}
pub fn list_locked_wallets(&self) -> Result<Vec<String>, Error> {
Err(Error::NoWallet)
}
pub fn list_locked_wallets(&self) -> Result<Vec<String>, Error> {
Err(Error::NoWallet)
}
pub fn pin_matrix_ack(&self, _: &str, _: &str) -> Result<bool, Error> {
Err(Error::NoWallet)
}
pub fn pin_matrix_ack(&self, _: &str, _: &str) -> Result<bool, Error> {
Err(Error::NoWallet)
}
pub fn sign_transaction(&self, _address: &Address, _transaction: &TransactionInfo, _rlp_transaction: &[u8]) -> Result<Signature, Error> {
Err(Error::NoWallet) }
pub fn sign_transaction(
&self,
_address: &Address,
_transaction: &TransactionInfo,
_rlp_transaction: &[u8],
) -> Result<Signature, Error> {
Err(Error::NoWallet)
}
pub fn sign_message(&self, _address: &Address, _msg: &[u8]) -> Result<Signature, Error> {
Err(Error::NoWallet)
}
pub fn sign_message(&self, _address: &Address, _msg: &[u8]) -> Result<Signature, Error> {
Err(Error::NoWallet)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "No hardware wallet!!")
}
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "No hardware wallet!!")
}
}

File diff suppressed because one or more lines are too long

View File

@ -28,15 +28,20 @@ extern crate protobuf;
extern crate semver;
extern crate trezor_sys;
#[macro_use] extern crate log;
#[cfg(test)] extern crate rustc_hex;
#[macro_use]
extern crate log;
#[cfg(test)]
extern crate rustc_hex;
mod ledger;
mod trezor;
use std::sync::{Arc, atomic, atomic::AtomicBool, Weak};
use std::{fmt, time::Duration};
use std::thread;
use std::{
fmt,
sync::{atomic, atomic::AtomicBool, Arc, Weak},
thread,
time::Duration,
};
use ethereum_types::U256;
use ethkey::{Address, Signature};
@ -50,291 +55,311 @@ const USB_EVENT_POLLING_INTERVAL: Duration = Duration::from_millis(500);
/// `HardwareWallet` device
#[derive(Debug)]
pub struct Device {
path: String,
info: WalletInfo,
path: String,
info: WalletInfo,
}
/// `Wallet` trait
pub trait Wallet<'a> {
/// Error
type Error;
/// Transaction data format
type Transaction;
/// Error
type Error;
/// Transaction data format
type Transaction;
/// Sign transaction data with wallet managing `address`.
fn sign_transaction(&self, address: &Address, transaction: Self::Transaction) -> Result<Signature, Self::Error>;
/// Sign transaction data with wallet managing `address`.
fn sign_transaction(
&self,
address: &Address,
transaction: Self::Transaction,
) -> Result<Signature, Self::Error>;
/// Set key derivation path for a chain.
fn set_key_path(&self, key_path: KeyPath);
/// Set key derivation path for a chain.
fn set_key_path(&self, key_path: KeyPath);
/// Re-populate device list
/// Note, this assumes all devices are iterated over and updated
fn update_devices(&self, device_direction: DeviceDirection) -> Result<usize, Self::Error>;
/// Re-populate device list
/// Note, this assumes all devices are iterated over and updated
fn update_devices(&self, device_direction: DeviceDirection) -> Result<usize, Self::Error>;
/// Read device info
fn read_device(&self, usb: &hidapi::HidApi, dev_info: &hidapi::HidDeviceInfo) -> Result<Device, Self::Error>;
/// Read device info
fn read_device(
&self,
usb: &hidapi::HidApi,
dev_info: &hidapi::HidDeviceInfo,
) -> Result<Device, Self::Error>;
/// List connected and acknowledged wallets
fn list_devices(&self) -> Vec<WalletInfo>;
/// List connected and acknowledged wallets
fn list_devices(&self) -> Vec<WalletInfo>;
/// List locked wallets
/// This may be moved if it is the wrong assumption, for example this is not supported by Ledger
/// Then this method return a empty vector
fn list_locked_devices(&self) -> Vec<String>;
/// List locked wallets
/// This may be moved if it is the wrong assumption, for example this is not supported by Ledger
/// Then this method return a empty vector
fn list_locked_devices(&self) -> Vec<String>;
/// Get wallet info.
fn get_wallet(&self, address: &Address) -> Option<WalletInfo>;
/// Get wallet info.
fn get_wallet(&self, address: &Address) -> Option<WalletInfo>;
/// Generate ethereum address for a Wallet
fn get_address(&self, device: &hidapi::HidDevice) -> Result<Option<Address>, Self::Error>;
/// Generate ethereum address for a Wallet
fn get_address(&self, device: &hidapi::HidDevice) -> Result<Option<Address>, Self::Error>;
/// Open a device using `device path`
/// Note, f - is a closure that borrows HidResult<HidDevice>
/// HidDevice is in turn a type alias for a `c_void function pointer`
/// For further information see:
/// * <https://github.com/paritytech/hidapi-rs>
/// * <https://github.com/rust-lang/libc>
fn open_path<R, F>(&self, f: F) -> Result<R, Self::Error>
where F: Fn() -> Result<R, &'static str>;
/// Open a device using `device path`
/// Note, f - is a closure that borrows HidResult<HidDevice>
/// HidDevice is in turn a type alias for a `c_void function pointer`
/// For further information see:
/// * <https://github.com/paritytech/hidapi-rs>
/// * <https://github.com/rust-lang/libc>
fn open_path<R, F>(&self, f: F) -> Result<R, Self::Error>
where
F: Fn() -> Result<R, &'static str>;
}
/// Hardware wallet error.
#[derive(Debug)]
pub enum Error {
/// Ledger device error.
LedgerDevice(ledger::Error),
/// Trezor device error
TrezorDevice(trezor::Error),
/// USB error.
Usb(libusb::Error),
/// HID error
Hid(String),
/// Hardware wallet not found for specified key.
KeyNotFound,
/// Ledger device error.
LedgerDevice(ledger::Error),
/// Trezor device error
TrezorDevice(trezor::Error),
/// USB error.
Usb(libusb::Error),
/// HID error
Hid(String),
/// Hardware wallet not found for specified key.
KeyNotFound,
}
/// This is the transaction info we need to supply to Trezor message. It's more
/// or less a duplicate of `ethcore::transaction::Transaction`, but we can't
/// import ethcore here as that would be a circular dependency.
pub struct TransactionInfo {
/// Nonce
pub nonce: U256,
/// Gas price
pub gas_price: U256,
/// Gas limit
pub gas_limit: U256,
/// Receiver
pub to: Option<Address>,
/// Value
pub value: U256,
/// Data
pub data: Vec<u8>,
/// Chain ID
pub chain_id: Option<u64>,
/// Nonce
pub nonce: U256,
/// Gas price
pub gas_price: U256,
/// Gas limit
pub gas_limit: U256,
/// Receiver
pub to: Option<Address>,
/// Value
pub value: U256,
/// Data
pub data: Vec<u8>,
/// Chain ID
pub chain_id: Option<u64>,
}
/// Hardware wallet information.
#[derive(Debug, Clone)]
pub struct WalletInfo {
/// Wallet device name.
pub name: String,
/// Wallet device manufacturer.
pub manufacturer: String,
/// Wallet device serial number.
pub serial: String,
/// Ethereum address.
pub address: Address,
/// Wallet device name.
pub name: String,
/// Wallet device manufacturer.
pub manufacturer: String,
/// Wallet device serial number.
pub serial: String,
/// Ethereum address.
pub address: Address,
}
/// Key derivation paths used on hardware wallets.
#[derive(Debug, Clone, Copy)]
pub enum KeyPath {
/// Ethereum.
Ethereum,
/// Ethereum classic.
EthereumClassic,
/// Ethereum.
Ethereum,
/// Ethereum classic.
EthereumClassic,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
Error::KeyNotFound => write!(f, "Key not found for given address."),
Error::LedgerDevice(ref e) => write!(f, "{}", e),
Error::TrezorDevice(ref e) => write!(f, "{}", e),
Error::Usb(ref e) => write!(f, "{}", e),
Error::Hid(ref e) => write!(f, "{}", e),
}
}
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
Error::KeyNotFound => write!(f, "Key not found for given address."),
Error::LedgerDevice(ref e) => write!(f, "{}", e),
Error::TrezorDevice(ref e) => write!(f, "{}", e),
Error::Usb(ref e) => write!(f, "{}", e),
Error::Hid(ref e) => write!(f, "{}", e),
}
}
}
impl From<ledger::Error> for Error {
fn from(err: ledger::Error) -> Self {
match err {
ledger::Error::KeyNotFound => Error::KeyNotFound,
_ => Error::LedgerDevice(err),
}
}
fn from(err: ledger::Error) -> Self {
match err {
ledger::Error::KeyNotFound => Error::KeyNotFound,
_ => Error::LedgerDevice(err),
}
}
}
impl From<trezor::Error> for Error {
fn from(err: trezor::Error) -> Self {
match err {
trezor::Error::KeyNotFound => Error::KeyNotFound,
_ => Error::TrezorDevice(err),
}
}
fn from(err: trezor::Error) -> Self {
match err {
trezor::Error::KeyNotFound => Error::KeyNotFound,
_ => Error::TrezorDevice(err),
}
}
}
impl From<libusb::Error> for Error {
fn from(err: libusb::Error) -> Self {
Error::Usb(err)
}
fn from(err: libusb::Error) -> Self {
Error::Usb(err)
}
}
/// Specifies the direction of the `HardwareWallet` i.e, whether it arrived or left
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum DeviceDirection {
/// Device arrived
Arrived,
/// Device left
Left,
/// Device arrived
Arrived,
/// Device left
Left,
}
impl fmt::Display for DeviceDirection {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
DeviceDirection::Arrived => write!(f, "arrived"),
DeviceDirection::Left => write!(f, "left"),
}
}
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
DeviceDirection::Arrived => write!(f, "arrived"),
DeviceDirection::Left => write!(f, "left"),
}
}
}
/// Hardware wallet management interface.
pub struct HardwareWalletManager {
exiting: Arc<AtomicBool>,
ledger: Arc<ledger::Manager>,
trezor: Arc<trezor::Manager>,
exiting: Arc<AtomicBool>,
ledger: Arc<ledger::Manager>,
trezor: Arc<trezor::Manager>,
}
impl HardwareWalletManager {
/// Hardware wallet constructor
pub fn new() -> Result<Self, Error> {
let exiting = Arc::new(AtomicBool::new(false));
let hidapi = Arc::new(Mutex::new(hidapi::HidApi::new().map_err(|e| Error::Hid(e.to_string().clone()))?));
let ledger = ledger::Manager::new(hidapi.clone());
let trezor = trezor::Manager::new(hidapi.clone());
let usb_context = Arc::new(libusb::Context::new()?);
/// Hardware wallet constructor
pub fn new() -> Result<Self, Error> {
let exiting = Arc::new(AtomicBool::new(false));
let hidapi = Arc::new(Mutex::new(
hidapi::HidApi::new().map_err(|e| Error::Hid(e.to_string().clone()))?,
));
let ledger = ledger::Manager::new(hidapi.clone());
let trezor = trezor::Manager::new(hidapi.clone());
let usb_context = Arc::new(libusb::Context::new()?);
let l = ledger.clone();
let t = trezor.clone();
let exit = exiting.clone();
let l = ledger.clone();
let t = trezor.clone();
let exit = exiting.clone();
// Subscribe to all vendor IDs (VIDs) and product IDs (PIDs)
// This means that the `HardwareWalletManager` is responsible to validate the detected device
usb_context.register_callback(
None, None, Some(HID_USB_DEVICE_CLASS),
Box::new(EventHandler::new(
Arc::downgrade(&ledger),
Arc::downgrade(&trezor)
))
)?;
// Subscribe to all vendor IDs (VIDs) and product IDs (PIDs)
// This means that the `HardwareWalletManager` is responsible to validate the detected device
usb_context.register_callback(
None,
None,
Some(HID_USB_DEVICE_CLASS),
Box::new(EventHandler::new(
Arc::downgrade(&ledger),
Arc::downgrade(&trezor),
)),
)?;
// Hardware event subscriber thread
thread::Builder::new()
.name("hw_wallet_manager".to_string())
.spawn(move || {
if let Err(e) = l.update_devices(DeviceDirection::Arrived) {
debug!(target: "hw", "Ledger couldn't connect at startup, error: {}", e);
}
if let Err(e) = t.update_devices(DeviceDirection::Arrived) {
debug!(target: "hw", "Trezor couldn't connect at startup, error: {}", e);
}
// Hardware event subscriber thread
thread::Builder::new()
.name("hw_wallet_manager".to_string())
.spawn(move || {
if let Err(e) = l.update_devices(DeviceDirection::Arrived) {
debug!(target: "hw", "Ledger couldn't connect at startup, error: {}", e);
}
if let Err(e) = t.update_devices(DeviceDirection::Arrived) {
debug!(target: "hw", "Trezor couldn't connect at startup, error: {}", e);
}
while !exit.load(atomic::Ordering::Acquire) {
if let Err(e) = usb_context.handle_events(Some(USB_EVENT_POLLING_INTERVAL)) {
debug!(target: "hw", "HardwareWalletManager event handler error: {}", e);
}
}
})
.ok();
while !exit.load(atomic::Ordering::Acquire) {
if let Err(e) = usb_context.handle_events(Some(USB_EVENT_POLLING_INTERVAL)) {
debug!(target: "hw", "HardwareWalletManager event handler error: {}", e);
}
}
})
.ok();
Ok(Self {
exiting,
trezor,
ledger,
})
}
Ok(Self {
exiting,
trezor,
ledger,
})
}
/// Select key derivation path for a chain.
/// Currently, only one hard-coded keypath is supported
/// It is managed by `ethcore/account_provider`
pub fn set_key_path(&self, key_path: KeyPath) {
self.ledger.set_key_path(key_path);
self.trezor.set_key_path(key_path);
}
/// Select key derivation path for a chain.
/// Currently, only one hard-coded keypath is supported
/// It is managed by `ethcore/account_provider`
pub fn set_key_path(&self, key_path: KeyPath) {
self.ledger.set_key_path(key_path);
self.trezor.set_key_path(key_path);
}
/// List connected wallets. This only returns wallets that are ready to be used.
pub fn list_wallets(&self) -> Vec<WalletInfo> {
let mut wallets = Vec::new();
wallets.extend(self.ledger.list_devices());
wallets.extend(self.trezor.list_devices());
wallets
}
/// List connected wallets. This only returns wallets that are ready to be used.
pub fn list_wallets(&self) -> Vec<WalletInfo> {
let mut wallets = Vec::new();
wallets.extend(self.ledger.list_devices());
wallets.extend(self.trezor.list_devices());
wallets
}
/// Return a list of paths to locked hardware wallets
/// This is only applicable to Trezor because Ledger only appears as
/// a device when it is unlocked
pub fn list_locked_wallets(&self) -> Result<Vec<String>, Error> {
Ok(self.trezor.list_locked_devices())
}
/// Return a list of paths to locked hardware wallets
/// This is only applicable to Trezor because Ledger only appears as
/// a device when it is unlocked
pub fn list_locked_wallets(&self) -> Result<Vec<String>, Error> {
Ok(self.trezor.list_locked_devices())
}
/// Get connected wallet info.
pub fn wallet_info(&self, address: &Address) -> Option<WalletInfo> {
if let Some(info) = self.ledger.get_wallet(address) {
Some(info)
} else {
self.trezor.get_wallet(address)
}
}
/// Get connected wallet info.
pub fn wallet_info(&self, address: &Address) -> Option<WalletInfo> {
if let Some(info) = self.ledger.get_wallet(address) {
Some(info)
} else {
self.trezor.get_wallet(address)
}
}
/// Sign a message with the wallet (only supported by Ledger)
pub fn sign_message(&self, address: &Address, msg: &[u8]) -> Result<Signature, Error> {
if self.ledger.get_wallet(address).is_some() {
Ok(self.ledger.sign_message(address, msg)?)
} else if self.trezor.get_wallet(address).is_some() {
Err(Error::TrezorDevice(trezor::Error::NoSigningMessage))
} else {
Err(Error::KeyNotFound)
}
}
/// Sign a message with the wallet (only supported by Ledger)
pub fn sign_message(&self, address: &Address, msg: &[u8]) -> Result<Signature, Error> {
if self.ledger.get_wallet(address).is_some() {
Ok(self.ledger.sign_message(address, msg)?)
} else if self.trezor.get_wallet(address).is_some() {
Err(Error::TrezorDevice(trezor::Error::NoSigningMessage))
} else {
Err(Error::KeyNotFound)
}
}
/// Sign transaction data with wallet managing `address`.
pub fn sign_transaction(&self, address: &Address, t_info: &TransactionInfo, encoded_transaction: &[u8]) -> Result<Signature, Error> {
if self.ledger.get_wallet(address).is_some() {
Ok(self.ledger.sign_transaction(address, encoded_transaction)?)
} else if self.trezor.get_wallet(address).is_some() {
Ok(self.trezor.sign_transaction(address, t_info)?)
} else {
Err(Error::KeyNotFound)
}
}
/// Sign transaction data with wallet managing `address`.
pub fn sign_transaction(
&self,
address: &Address,
t_info: &TransactionInfo,
encoded_transaction: &[u8],
) -> Result<Signature, Error> {
if self.ledger.get_wallet(address).is_some() {
Ok(self.ledger.sign_transaction(address, encoded_transaction)?)
} else if self.trezor.get_wallet(address).is_some() {
Ok(self.trezor.sign_transaction(address, t_info)?)
} else {
Err(Error::KeyNotFound)
}
}
/// Send a pin to a device at a certain path to unlock it
/// This is only applicable to Trezor because Ledger only appears as
/// a device when it is unlocked
pub fn pin_matrix_ack(&self, path: &str, pin: &str) -> Result<bool, Error> {
self.trezor.pin_matrix_ack(path, pin).map_err(Error::TrezorDevice)
}
/// Send a pin to a device at a certain path to unlock it
/// This is only applicable to Trezor because Ledger only appears as
/// a device when it is unlocked
pub fn pin_matrix_ack(&self, path: &str, pin: &str) -> Result<bool, Error> {
self.trezor
.pin_matrix_ack(path, pin)
.map_err(Error::TrezorDevice)
}
}
impl Drop for HardwareWalletManager {
fn drop(&mut self) {
// Indicate to the USB Hotplug handler that it
// shall terminate but don't wait for it to terminate.
// If it doesn't terminate for some reason USB Hotplug events will be handled
// even if the HardwareWalletManger has been dropped
self.exiting.store(true, atomic::Ordering::Release);
}
fn drop(&mut self) {
// Indicate to the USB Hotplug handler that it
// shall terminate but don't wait for it to terminate.
// If it doesn't terminate for some reason USB Hotplug events will be handled
// even if the HardwareWalletManger has been dropped
self.exiting.store(true, atomic::Ordering::Release);
}
}
/// Hardware wallet event handler
@ -342,61 +367,77 @@ impl Drop for HardwareWalletManager {
/// Note, that this runs to completion and race-conditions can't occur but it can
/// stop other events for being processed with an infinite loop or similar
struct EventHandler {
ledger: Weak<ledger::Manager>,
trezor: Weak<trezor::Manager>,
ledger: Weak<ledger::Manager>,
trezor: Weak<trezor::Manager>,
}
impl EventHandler {
/// Trezor event handler constructor
pub fn new(ledger: Weak<ledger::Manager>, trezor: Weak<trezor::Manager>) -> Self {
Self { ledger, trezor }
}
/// Trezor event handler constructor
pub fn new(ledger: Weak<ledger::Manager>, trezor: Weak<trezor::Manager>) -> Self {
Self { ledger, trezor }
}
fn extract_device_info(device: &libusb::Device) -> Result<(u16, u16), Error> {
let desc = device.device_descriptor()?;
Ok((desc.vendor_id(), desc.product_id()))
}
fn extract_device_info(device: &libusb::Device) -> Result<(u16, u16), Error> {
let desc = device.device_descriptor()?;
Ok((desc.vendor_id(), desc.product_id()))
}
}
impl libusb::Hotplug for EventHandler {
fn device_arrived(&mut self, device: libusb::Device) {
// Upgrade reference to an Arc
if let (Some(ledger), Some(trezor)) = (self.ledger.upgrade(), self.trezor.upgrade()) {
// Version ID and Product ID are available
if let Ok((vid, pid)) = Self::extract_device_info(&device) {
if trezor::is_valid_trezor(vid, pid) {
if !trezor::try_connect_polling(&trezor, &MAX_POLLING_DURATION, DeviceDirection::Arrived) {
trace!(target: "hw", "Trezor device was detected but connection failed");
}
} else if ledger::is_valid_ledger(vid, pid) {
if !ledger::try_connect_polling(&ledger, &MAX_POLLING_DURATION, DeviceDirection::Arrived) {
trace!(target: "hw", "Ledger device was detected but connection failed");
}
}
}
}
}
fn device_arrived(&mut self, device: libusb::Device) {
// Upgrade reference to an Arc
if let (Some(ledger), Some(trezor)) = (self.ledger.upgrade(), self.trezor.upgrade()) {
// Version ID and Product ID are available
if let Ok((vid, pid)) = Self::extract_device_info(&device) {
if trezor::is_valid_trezor(vid, pid) {
if !trezor::try_connect_polling(
&trezor,
&MAX_POLLING_DURATION,
DeviceDirection::Arrived,
) {
trace!(target: "hw", "Trezor device was detected but connection failed");
}
} else if ledger::is_valid_ledger(vid, pid) {
if !ledger::try_connect_polling(
&ledger,
&MAX_POLLING_DURATION,
DeviceDirection::Arrived,
) {
trace!(target: "hw", "Ledger device was detected but connection failed");
}
}
}
}
}
fn device_left(&mut self, device: libusb::Device) {
// Upgrade reference to an Arc
if let (Some(ledger), Some(trezor)) = (self.ledger.upgrade(), self.trezor.upgrade()) {
// Version ID and Product ID are available
if let Ok((vid, pid)) = Self::extract_device_info(&device) {
if trezor::is_valid_trezor(vid, pid) {
if !trezor::try_connect_polling(&trezor, &MAX_POLLING_DURATION, DeviceDirection::Left) {
trace!(target: "hw", "Trezor device was detected but disconnection failed");
}
} else if ledger::is_valid_ledger(vid, pid) {
if !ledger::try_connect_polling(&ledger, &MAX_POLLING_DURATION, DeviceDirection::Left) {
trace!(target: "hw", "Ledger device was detected but disconnection failed");
}
}
}
}
}
fn device_left(&mut self, device: libusb::Device) {
// Upgrade reference to an Arc
if let (Some(ledger), Some(trezor)) = (self.ledger.upgrade(), self.trezor.upgrade()) {
// Version ID and Product ID are available
if let Ok((vid, pid)) = Self::extract_device_info(&device) {
if trezor::is_valid_trezor(vid, pid) {
if !trezor::try_connect_polling(
&trezor,
&MAX_POLLING_DURATION,
DeviceDirection::Left,
) {
trace!(target: "hw", "Trezor device was detected but disconnection failed");
}
} else if ledger::is_valid_ledger(vid, pid) {
if !ledger::try_connect_polling(
&ledger,
&MAX_POLLING_DURATION,
DeviceDirection::Left,
) {
trace!(target: "hw", "Ledger device was detected but disconnection failed");
}
}
}
}
}
}
/// Helper to determine if a device is a valid HID
pub fn is_valid_hid_device(usage_page: u16, interface_number: i32) -> bool {
usage_page == HID_GLOBAL_USAGE_PAGE || interface_number == HID_USB_DEVICE_CLASS as i32
usage_page == HID_GLOBAL_USAGE_PAGE || interface_number == HID_USB_DEVICE_CLASS as i32
}

View File

@ -19,19 +19,26 @@
//! and <https://github.com/trezor/trezor-common/blob/master/protob/protocol.md>
//! for protocol details.
use std::cmp::{min, max};
use std::sync::Arc;
use std::time::{Duration, Instant};
use std::fmt;
use std::{
cmp::{max, min},
fmt,
sync::Arc,
time::{Duration, Instant},
};
use ethereum_types::{U256, H256, Address};
use super::{
is_valid_hid_device, Device, DeviceDirection, KeyPath, TransactionInfo, Wallet, WalletInfo,
};
use ethereum_types::{Address, H256, U256};
use ethkey::Signature;
use hidapi;
use libusb;
use parking_lot::{Mutex, RwLock};
use protobuf::{self, Message, ProtobufEnum};
use super::{DeviceDirection, WalletInfo, TransactionInfo, KeyPath, Wallet, Device, is_valid_hid_device};
use trezor_sys::messages::{EthereumAddress, PinMatrixAck, MessageType, EthereumTxRequest, EthereumSignTx, EthereumGetAddress, EthereumTxAck, ButtonAck};
use trezor_sys::messages::{
ButtonAck, EthereumAddress, EthereumGetAddress, EthereumSignTx, EthereumTxAck,
EthereumTxRequest, MessageType, PinMatrixAck,
};
/// Trezor v1 vendor ID
const TREZOR_VID: u16 = 0x534c;
@ -44,388 +51,450 @@ const ETC_DERIVATION_PATH: [u32; 5] = [0x8000_002C, 0x8000_003D, 0x8000_0000, 0,
/// Hardware wallet error.
#[derive(Debug)]
pub enum Error {
/// Ethereum wallet protocol error.
Protocol(&'static str),
/// Hidapi error.
Usb(hidapi::HidError),
/// Libusb error
LibUsb(libusb::Error),
/// Device with request key is not available.
KeyNotFound,
/// Signing has been cancelled by user.
UserCancel,
/// The Message Type given in the trezor RPC call is not something we recognize
BadMessageType,
/// Trying to read from a closed device at the given path
LockedDevice(String),
/// Signing messages are not supported by Trezor
NoSigningMessage,
/// No device arrived
NoDeviceArrived,
/// No device left
NoDeviceLeft,
/// Invalid PID or VID
InvalidDevice,
/// Ethereum wallet protocol error.
Protocol(&'static str),
/// Hidapi error.
Usb(hidapi::HidError),
/// Libusb error
LibUsb(libusb::Error),
/// Device with request key is not available.
KeyNotFound,
/// Signing has been cancelled by user.
UserCancel,
/// The Message Type given in the trezor RPC call is not something we recognize
BadMessageType,
/// Trying to read from a closed device at the given path
LockedDevice(String),
/// Signing messages are not supported by Trezor
NoSigningMessage,
/// No device arrived
NoDeviceArrived,
/// No device left
NoDeviceLeft,
/// Invalid PID or VID
InvalidDevice,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
Error::Protocol(ref s) => write!(f, "Trezor protocol error: {}", s),
Error::Usb(ref e) => write!(f, "USB communication error: {}", e),
Error::LibUsb(ref e) => write!(f, "LibUSB communication error: {}", e),
Error::KeyNotFound => write!(f, "Key not found"),
Error::UserCancel => write!(f, "Operation has been cancelled"),
Error::BadMessageType => write!(f, "Bad Message Type in RPC call"),
Error::LockedDevice(ref s) => write!(f, "Device is locked, needs PIN to perform operations: {}", s),
Error::NoSigningMessage=> write!(f, "Signing messages are not supported by Trezor"),
Error::NoDeviceArrived => write!(f, "No device arrived"),
Error::NoDeviceLeft => write!(f, "No device left"),
Error::InvalidDevice => write!(f, "Device with non-supported product ID or vendor ID was detected"),
}
}
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
Error::Protocol(ref s) => write!(f, "Trezor protocol error: {}", s),
Error::Usb(ref e) => write!(f, "USB communication error: {}", e),
Error::LibUsb(ref e) => write!(f, "LibUSB communication error: {}", e),
Error::KeyNotFound => write!(f, "Key not found"),
Error::UserCancel => write!(f, "Operation has been cancelled"),
Error::BadMessageType => write!(f, "Bad Message Type in RPC call"),
Error::LockedDevice(ref s) => write!(
f,
"Device is locked, needs PIN to perform operations: {}",
s
),
Error::NoSigningMessage => write!(f, "Signing messages are not supported by Trezor"),
Error::NoDeviceArrived => write!(f, "No device arrived"),
Error::NoDeviceLeft => write!(f, "No device left"),
Error::InvalidDevice => write!(
f,
"Device with non-supported product ID or vendor ID was detected"
),
}
}
}
impl From<hidapi::HidError> for Error {
fn from(err: hidapi::HidError) -> Self {
Error::Usb(err)
}
fn from(err: hidapi::HidError) -> Self {
Error::Usb(err)
}
}
impl From<libusb::Error> for Error {
fn from(err: libusb::Error) -> Self {
Error::LibUsb(err)
}
fn from(err: libusb::Error) -> Self {
Error::LibUsb(err)
}
}
impl From<protobuf::ProtobufError> for Error {
fn from(_: protobuf::ProtobufError) -> Self {
Error::Protocol(&"Could not read response from Trezor Device")
}
fn from(_: protobuf::ProtobufError) -> Self {
Error::Protocol(&"Could not read response from Trezor Device")
}
}
/// Trezor device manager
pub struct Manager {
usb: Arc<Mutex<hidapi::HidApi>>,
devices: RwLock<Vec<Device>>,
locked_devices: RwLock<Vec<String>>,
key_path: RwLock<KeyPath>,
usb: Arc<Mutex<hidapi::HidApi>>,
devices: RwLock<Vec<Device>>,
locked_devices: RwLock<Vec<String>>,
key_path: RwLock<KeyPath>,
}
/// HID Version used for the Trezor device
enum HidVersion {
V1,
V2,
V1,
V2,
}
impl Manager {
/// Create a new instance.
pub fn new(usb: Arc<Mutex<hidapi::HidApi>>) -> Arc<Self> {
Arc::new(Self {
usb,
devices: RwLock::new(Vec::new()),
locked_devices: RwLock::new(Vec::new()),
key_path: RwLock::new(KeyPath::Ethereum),
})
}
/// Create a new instance.
pub fn new(usb: Arc<Mutex<hidapi::HidApi>>) -> Arc<Self> {
Arc::new(Self {
usb,
devices: RwLock::new(Vec::new()),
locked_devices: RwLock::new(Vec::new()),
key_path: RwLock::new(KeyPath::Ethereum),
})
}
pub fn pin_matrix_ack(&self, device_path: &str, pin: &str) -> Result<bool, Error> {
let unlocked = {
let usb = self.usb.lock();
let device = self.open_path(|| usb.open_path(&device_path))?;
let t = MessageType::MessageType_PinMatrixAck;
let mut m = PinMatrixAck::new();
m.set_pin(pin.to_string());
self.send_device_message(&device, t, &m)?;
let (resp_type, _) = self.read_device_response(&device)?;
match resp_type {
// Getting an Address back means it's unlocked, this is undocumented behavior
MessageType::MessageType_EthereumAddress => Ok(true),
// Getting anything else means we didn't unlock it
_ => Ok(false),
pub fn pin_matrix_ack(&self, device_path: &str, pin: &str) -> Result<bool, Error> {
let unlocked = {
let usb = self.usb.lock();
let device = self.open_path(|| usb.open_path(&device_path))?;
let t = MessageType::MessageType_PinMatrixAck;
let mut m = PinMatrixAck::new();
m.set_pin(pin.to_string());
self.send_device_message(&device, t, &m)?;
let (resp_type, _) = self.read_device_response(&device)?;
match resp_type {
// Getting an Address back means it's unlocked, this is undocumented behavior
MessageType::MessageType_EthereumAddress => Ok(true),
// Getting anything else means we didn't unlock it
_ => Ok(false),
}
};
self.update_devices(DeviceDirection::Arrived)?;
unlocked
}
}
};
self.update_devices(DeviceDirection::Arrived)?;
unlocked
}
fn u256_to_be_vec(&self, val: &U256) -> Vec<u8> {
let mut buf = [0_u8; 32];
val.to_big_endian(&mut buf);
buf.iter().skip_while(|x| **x == 0).cloned().collect()
}
fn u256_to_be_vec(&self, val: &U256) -> Vec<u8> {
let mut buf = [0_u8; 32];
val.to_big_endian(&mut buf);
buf.iter().skip_while(|x| **x == 0).cloned().collect()
}
fn signing_loop(
&self,
handle: &hidapi::HidDevice,
chain_id: &Option<u64>,
data: &[u8],
) -> Result<Signature, Error> {
let (resp_type, bytes) = self.read_device_response(&handle)?;
match resp_type {
MessageType::MessageType_Cancel => Err(Error::UserCancel),
MessageType::MessageType_ButtonRequest => {
self.send_device_message(
handle,
MessageType::MessageType_ButtonAck,
&ButtonAck::new(),
)?;
// Signing loop goes back to the top and reading blocks
// for up to 5 minutes waiting for response from the device
// if the user doesn't click any button within 5 minutes you
// get a signing error and the device sort of locks up on the signing screen
self.signing_loop(handle, chain_id, data)
}
MessageType::MessageType_EthereumTxRequest => {
let resp: EthereumTxRequest = protobuf::core::parse_from_bytes(&bytes)?;
if resp.has_data_length() {
let mut msg = EthereumTxAck::new();
let len = resp.get_data_length() as usize;
msg.set_data_chunk(data[..len].to_vec());
self.send_device_message(handle, MessageType::MessageType_EthereumTxAck, &msg)?;
self.signing_loop(handle, chain_id, &data[len..])
} else {
let v = resp.get_signature_v();
let r = H256::from_slice(resp.get_signature_r());
let s = H256::from_slice(resp.get_signature_s());
if let Some(c_id) = *chain_id {
// If there is a chain_id supplied, Trezor will return a v
// part of the signature that is already adjusted for EIP-155,
// so v' = v + 2 * chain_id + 35, but code further down the
// pipeline will already do this transformation, so remove it here
let adjustment = 35 + 2 * c_id as u32;
Ok(Signature::from_rsv(
&r,
&s,
(max(v, adjustment) - adjustment) as u8,
))
} else {
// If there isn't a chain_id, v will be returned as v + 27
let adjusted_v = if v < 27 { v } else { v - 27 };
Ok(Signature::from_rsv(&r, &s, adjusted_v as u8))
}
}
}
MessageType::MessageType_Failure => {
Err(Error::Protocol("Last message sent to Trezor failed"))
}
_ => Err(Error::Protocol("Unexpected response from Trezor device.")),
}
}
fn signing_loop(&self, handle: &hidapi::HidDevice, chain_id: &Option<u64>, data: &[u8]) -> Result<Signature, Error> {
let (resp_type, bytes) = self.read_device_response(&handle)?;
match resp_type {
MessageType::MessageType_Cancel => Err(Error::UserCancel),
MessageType::MessageType_ButtonRequest => {
self.send_device_message(handle, MessageType::MessageType_ButtonAck, &ButtonAck::new())?;
// Signing loop goes back to the top and reading blocks
// for up to 5 minutes waiting for response from the device
// if the user doesn't click any button within 5 minutes you
// get a signing error and the device sort of locks up on the signing screen
self.signing_loop(handle, chain_id, data)
}
MessageType::MessageType_EthereumTxRequest => {
let resp: EthereumTxRequest = protobuf::core::parse_from_bytes(&bytes)?;
if resp.has_data_length() {
let mut msg = EthereumTxAck::new();
let len = resp.get_data_length() as usize;
msg.set_data_chunk(data[..len].to_vec());
self.send_device_message(handle, MessageType::MessageType_EthereumTxAck, &msg)?;
self.signing_loop(handle, chain_id, &data[len..])
} else {
let v = resp.get_signature_v();
let r = H256::from_slice(resp.get_signature_r());
let s = H256::from_slice(resp.get_signature_s());
if let Some(c_id) = *chain_id {
// If there is a chain_id supplied, Trezor will return a v
// part of the signature that is already adjusted for EIP-155,
// so v' = v + 2 * chain_id + 35, but code further down the
// pipeline will already do this transformation, so remove it here
let adjustment = 35 + 2 * c_id as u32;
Ok(Signature::from_rsv(&r, &s, (max(v, adjustment) - adjustment) as u8))
} else {
// If there isn't a chain_id, v will be returned as v + 27
let adjusted_v = if v < 27 { v } else { v - 27 };
Ok(Signature::from_rsv(&r, &s, adjusted_v as u8))
}
}
}
MessageType::MessageType_Failure => Err(Error::Protocol("Last message sent to Trezor failed")),
_ => Err(Error::Protocol("Unexpected response from Trezor device.")),
}
}
fn send_device_message(
&self,
device: &hidapi::HidDevice,
msg_type: MessageType,
msg: &Message,
) -> Result<usize, Error> {
let msg_id = msg_type as u16;
let mut message = msg.write_to_bytes()?;
let msg_size = message.len();
let mut data = Vec::new();
let hid_version = self.probe_hid_version(device)?;
// Magic constants
data.push(b'#');
data.push(b'#');
// Convert msg_id to BE and split into bytes
data.push(((msg_id >> 8) & 0xFF) as u8);
data.push((msg_id & 0xFF) as u8);
// Convert msg_size to BE and split into bytes
data.push(((msg_size >> 24) & 0xFF) as u8);
data.push(((msg_size >> 16) & 0xFF) as u8);
data.push(((msg_size >> 8) & 0xFF) as u8);
data.push((msg_size & 0xFF) as u8);
data.append(&mut message);
while data.len() % 63 > 0 {
data.push(0);
}
let mut total_written = 0;
for chunk in data.chunks(63) {
let mut padded_chunk = match hid_version {
HidVersion::V1 => vec![b'?'],
HidVersion::V2 => vec![0, b'?'],
};
padded_chunk.extend_from_slice(&chunk);
total_written += device.write(&padded_chunk)?;
}
Ok(total_written)
}
fn send_device_message(&self, device: &hidapi::HidDevice, msg_type: MessageType, msg: &Message) -> Result<usize, Error> {
let msg_id = msg_type as u16;
let mut message = msg.write_to_bytes()?;
let msg_size = message.len();
let mut data = Vec::new();
let hid_version = self.probe_hid_version(device)?;
// Magic constants
data.push(b'#');
data.push(b'#');
// Convert msg_id to BE and split into bytes
data.push(((msg_id >> 8) & 0xFF) as u8);
data.push((msg_id & 0xFF) as u8);
// Convert msg_size to BE and split into bytes
data.push(((msg_size >> 24) & 0xFF) as u8);
data.push(((msg_size >> 16) & 0xFF) as u8);
data.push(((msg_size >> 8) & 0xFF) as u8);
data.push((msg_size & 0xFF) as u8);
data.append(&mut message);
while data.len() % 63 > 0 {
data.push(0);
}
let mut total_written = 0;
for chunk in data.chunks(63) {
let mut padded_chunk = match hid_version {
HidVersion::V1 => vec![b'?'],
HidVersion::V2 => vec![0, b'?'],
};
padded_chunk.extend_from_slice(&chunk);
total_written += device.write(&padded_chunk)?;
}
Ok(total_written)
}
fn probe_hid_version(&self, device: &hidapi::HidDevice) -> Result<HidVersion, Error> {
let mut buf2 = [0xFF_u8; 65];
buf2[0] = 0;
buf2[1] = 63;
let mut buf1 = [0xFF_u8; 64];
buf1[0] = 63;
if device.write(&buf2)? == 65 {
Ok(HidVersion::V2)
} else if device.write(&buf1)? == 64 {
Ok(HidVersion::V1)
} else {
Err(Error::Usb("Unable to determine HID Version"))
}
}
fn probe_hid_version(&self, device: &hidapi::HidDevice) -> Result<HidVersion, Error> {
let mut buf2 = [0xFF_u8; 65];
buf2[0] = 0;
buf2[1] = 63;
let mut buf1 = [0xFF_u8; 64];
buf1[0] = 63;
if device.write(&buf2)? == 65 {
Ok(HidVersion::V2)
} else if device.write(&buf1)? == 64 {
Ok(HidVersion::V1)
} else {
Err(Error::Usb("Unable to determine HID Version"))
}
}
fn read_device_response(
&self,
device: &hidapi::HidDevice,
) -> Result<(MessageType, Vec<u8>), Error> {
let protocol_err = Error::Protocol(&"Unexpected wire response from Trezor Device");
let mut buf = vec![0; 64];
fn read_device_response(&self, device: &hidapi::HidDevice) -> Result<(MessageType, Vec<u8>), Error> {
let protocol_err = Error::Protocol(&"Unexpected wire response from Trezor Device");
let mut buf = vec![0; 64];
let first_chunk = device.read_timeout(&mut buf, 300_000)?;
if first_chunk < 9 || buf[0] != b'?' || buf[1] != b'#' || buf[2] != b'#' {
return Err(protocol_err);
}
let msg_type = MessageType::from_i32(((buf[3] as i32 & 0xFF) << 8) + (buf[4] as i32 & 0xFF)).ok_or(protocol_err)?;
let msg_size = ((buf[5] as u32 & 0xFF) << 24) + ((buf[6] as u32 & 0xFF) << 16) + ((buf[7] as u32 & 0xFF) << 8) + (buf[8] as u32 & 0xFF);
let mut data = Vec::new();
data.extend_from_slice(&buf[9..]);
while data.len() < (msg_size as usize) {
device.read_timeout(&mut buf, 10_000)?;
data.extend_from_slice(&buf[1..]);
}
Ok((msg_type, data[..msg_size as usize].to_vec()))
}
let first_chunk = device.read_timeout(&mut buf, 300_000)?;
if first_chunk < 9 || buf[0] != b'?' || buf[1] != b'#' || buf[2] != b'#' {
return Err(protocol_err);
}
let msg_type =
MessageType::from_i32(((buf[3] as i32 & 0xFF) << 8) + (buf[4] as i32 & 0xFF))
.ok_or(protocol_err)?;
let msg_size = ((buf[5] as u32 & 0xFF) << 24)
+ ((buf[6] as u32 & 0xFF) << 16)
+ ((buf[7] as u32 & 0xFF) << 8)
+ (buf[8] as u32 & 0xFF);
let mut data = Vec::new();
data.extend_from_slice(&buf[9..]);
while data.len() < (msg_size as usize) {
device.read_timeout(&mut buf, 10_000)?;
data.extend_from_slice(&buf[1..]);
}
Ok((msg_type, data[..msg_size as usize].to_vec()))
}
}
impl<'a> Wallet<'a> for Manager {
type Error = Error;
type Transaction = &'a TransactionInfo;
type Error = Error;
type Transaction = &'a TransactionInfo;
fn sign_transaction(&self, address: &Address, t_info: Self::Transaction) ->
Result<Signature, Error> {
let usb = self.usb.lock();
let devices = self.devices.read();
let device = devices.iter().find(|d| &d.info.address == address).ok_or(Error::KeyNotFound)?;
let handle = self.open_path(|| usb.open_path(&device.path))?;
let msg_type = MessageType::MessageType_EthereumSignTx;
let mut message = EthereumSignTx::new();
match *self.key_path.read() {
KeyPath::Ethereum => message.set_address_n(ETH_DERIVATION_PATH.to_vec()),
KeyPath::EthereumClassic => message.set_address_n(ETC_DERIVATION_PATH.to_vec()),
}
message.set_nonce(self.u256_to_be_vec(&t_info.nonce));
message.set_gas_limit(self.u256_to_be_vec(&t_info.gas_limit));
message.set_gas_price(self.u256_to_be_vec(&t_info.gas_price));
message.set_value(self.u256_to_be_vec(&t_info.value));
fn sign_transaction(
&self,
address: &Address,
t_info: Self::Transaction,
) -> Result<Signature, Error> {
let usb = self.usb.lock();
let devices = self.devices.read();
let device = devices
.iter()
.find(|d| &d.info.address == address)
.ok_or(Error::KeyNotFound)?;
let handle = self.open_path(|| usb.open_path(&device.path))?;
let msg_type = MessageType::MessageType_EthereumSignTx;
let mut message = EthereumSignTx::new();
match *self.key_path.read() {
KeyPath::Ethereum => message.set_address_n(ETH_DERIVATION_PATH.to_vec()),
KeyPath::EthereumClassic => message.set_address_n(ETC_DERIVATION_PATH.to_vec()),
}
message.set_nonce(self.u256_to_be_vec(&t_info.nonce));
message.set_gas_limit(self.u256_to_be_vec(&t_info.gas_limit));
message.set_gas_price(self.u256_to_be_vec(&t_info.gas_price));
message.set_value(self.u256_to_be_vec(&t_info.value));
if let Some(addr) = t_info.to {
message.set_to(addr.to_vec())
}
let first_chunk_length = min(t_info.data.len(), 1024);
let chunk = &t_info.data[0..first_chunk_length];
message.set_data_initial_chunk(chunk.to_vec());
message.set_data_length(t_info.data.len() as u32);
if let Some(c_id) = t_info.chain_id {
message.set_chain_id(c_id as u32);
}
if let Some(addr) = t_info.to {
message.set_to(addr.to_vec())
}
let first_chunk_length = min(t_info.data.len(), 1024);
let chunk = &t_info.data[0..first_chunk_length];
message.set_data_initial_chunk(chunk.to_vec());
message.set_data_length(t_info.data.len() as u32);
if let Some(c_id) = t_info.chain_id {
message.set_chain_id(c_id as u32);
}
self.send_device_message(&handle, msg_type, &message)?;
self.send_device_message(&handle, msg_type, &message)?;
self.signing_loop(&handle, &t_info.chain_id, &t_info.data[first_chunk_length..])
}
self.signing_loop(
&handle,
&t_info.chain_id,
&t_info.data[first_chunk_length..],
)
}
fn set_key_path(&self, key_path: KeyPath) {
*self.key_path.write() = key_path;
}
fn set_key_path(&self, key_path: KeyPath) {
*self.key_path.write() = key_path;
}
fn update_devices(&self, device_direction: DeviceDirection) -> Result<usize, Error> {
let mut usb = self.usb.lock();
usb.refresh_devices();
let devices = usb.devices();
let num_prev_devices = self.devices.read().len();
fn update_devices(&self, device_direction: DeviceDirection) -> Result<usize, Error> {
let mut usb = self.usb.lock();
usb.refresh_devices();
let devices = usb.devices();
let num_prev_devices = self.devices.read().len();
let detected_devices = devices.iter()
.filter(|&d| is_valid_trezor(d.vendor_id, d.product_id) &&
is_valid_hid_device(d.usage_page, d.interface_number)
)
.fold(Vec::new(), |mut v, d| {
match self.read_device(&usb, &d) {
Ok(info) => {
trace!(target: "hw", "Found device: {:?}", info);
v.push(info);
}
Err(e) => trace!(target: "hw", "Error reading device info: {}", e),
};
v
});
let detected_devices = devices
.iter()
.filter(|&d| {
is_valid_trezor(d.vendor_id, d.product_id)
&& is_valid_hid_device(d.usage_page, d.interface_number)
})
.fold(Vec::new(), |mut v, d| {
match self.read_device(&usb, &d) {
Ok(info) => {
trace!(target: "hw", "Found device: {:?}", info);
v.push(info);
}
Err(e) => trace!(target: "hw", "Error reading device info: {}", e),
};
v
});
let num_curr_devices = detected_devices.len();
*self.devices.write() = detected_devices;
let num_curr_devices = detected_devices.len();
*self.devices.write() = detected_devices;
match device_direction {
DeviceDirection::Arrived => {
if num_curr_devices > num_prev_devices {
Ok(num_curr_devices - num_prev_devices)
} else {
Err(Error::NoDeviceArrived)
}
}
DeviceDirection::Left => {
if num_prev_devices > num_curr_devices {
Ok(num_prev_devices - num_curr_devices)
} else {
Err(Error::NoDeviceLeft)
}
}
}
}
match device_direction {
DeviceDirection::Arrived => {
if num_curr_devices > num_prev_devices {
Ok(num_curr_devices - num_prev_devices)
} else {
Err(Error::NoDeviceArrived)
}
}
DeviceDirection::Left => {
if num_prev_devices > num_curr_devices {
Ok(num_prev_devices - num_curr_devices)
} else {
Err(Error::NoDeviceLeft)
}
}
}
}
fn read_device(&self, usb: &hidapi::HidApi, dev_info: &hidapi::HidDeviceInfo) -> Result<Device, Error> {
let handle = self.open_path(|| usb.open_path(&dev_info.path))?;
let manufacturer = dev_info.manufacturer_string.clone().unwrap_or_else(|| "Unknown".to_owned());
let name = dev_info.product_string.clone().unwrap_or_else(|| "Unknown".to_owned());
let serial = dev_info.serial_number.clone().unwrap_or_else(|| "Unknown".to_owned());
match self.get_address(&handle) {
Ok(Some(addr)) => {
Ok(Device {
path: dev_info.path.clone(),
info: WalletInfo {
name,
manufacturer,
serial,
address: addr,
},
})
}
Ok(None) => Err(Error::LockedDevice(dev_info.path.clone())),
Err(e) => Err(e),
}
}
fn read_device(
&self,
usb: &hidapi::HidApi,
dev_info: &hidapi::HidDeviceInfo,
) -> Result<Device, Error> {
let handle = self.open_path(|| usb.open_path(&dev_info.path))?;
let manufacturer = dev_info
.manufacturer_string
.clone()
.unwrap_or_else(|| "Unknown".to_owned());
let name = dev_info
.product_string
.clone()
.unwrap_or_else(|| "Unknown".to_owned());
let serial = dev_info
.serial_number
.clone()
.unwrap_or_else(|| "Unknown".to_owned());
match self.get_address(&handle) {
Ok(Some(addr)) => Ok(Device {
path: dev_info.path.clone(),
info: WalletInfo {
name,
manufacturer,
serial,
address: addr,
},
}),
Ok(None) => Err(Error::LockedDevice(dev_info.path.clone())),
Err(e) => Err(e),
}
}
fn list_devices(&self) -> Vec<WalletInfo> {
self.devices.read().iter().map(|d| d.info.clone()).collect()
}
fn list_devices(&self) -> Vec<WalletInfo> {
self.devices.read().iter().map(|d| d.info.clone()).collect()
}
fn list_locked_devices(&self) -> Vec<String> {
(*self.locked_devices.read()).clone()
}
fn list_locked_devices(&self) -> Vec<String> {
(*self.locked_devices.read()).clone()
}
fn get_wallet(&self, address: &Address) -> Option<WalletInfo> {
self.devices.read().iter().find(|d| &d.info.address == address).map(|d| d.info.clone())
}
fn get_wallet(&self, address: &Address) -> Option<WalletInfo> {
self.devices
.read()
.iter()
.find(|d| &d.info.address == address)
.map(|d| d.info.clone())
}
fn get_address(&self, device: &hidapi::HidDevice) -> Result<Option<Address>, Error> {
let typ = MessageType::MessageType_EthereumGetAddress;
let mut message = EthereumGetAddress::new();
match *self.key_path.read() {
KeyPath::Ethereum => message.set_address_n(ETH_DERIVATION_PATH.to_vec()),
KeyPath::EthereumClassic => message.set_address_n(ETC_DERIVATION_PATH.to_vec()),
}
message.set_show_display(false);
self.send_device_message(&device, typ, &message)?;
fn get_address(&self, device: &hidapi::HidDevice) -> Result<Option<Address>, Error> {
let typ = MessageType::MessageType_EthereumGetAddress;
let mut message = EthereumGetAddress::new();
match *self.key_path.read() {
KeyPath::Ethereum => message.set_address_n(ETH_DERIVATION_PATH.to_vec()),
KeyPath::EthereumClassic => message.set_address_n(ETC_DERIVATION_PATH.to_vec()),
}
message.set_show_display(false);
self.send_device_message(&device, typ, &message)?;
let (resp_type, bytes) = self.read_device_response(&device)?;
match resp_type {
MessageType::MessageType_EthereumAddress => {
let response: EthereumAddress = protobuf::core::parse_from_bytes(&bytes)?;
Ok(Some(From::from(response.get_address())))
}
_ => Ok(None),
}
}
let (resp_type, bytes) = self.read_device_response(&device)?;
match resp_type {
MessageType::MessageType_EthereumAddress => {
let response: EthereumAddress = protobuf::core::parse_from_bytes(&bytes)?;
Ok(Some(From::from(response.get_address())))
}
_ => Ok(None),
}
}
fn open_path<R, F>(&self, f: F) -> Result<R, Error>
where F: Fn() -> Result<R, &'static str>
{
f().map_err(Into::into)
}
fn open_path<R, F>(&self, f: F) -> Result<R, Error>
where
F: Fn() -> Result<R, &'static str>,
{
f().map_err(Into::into)
}
}
/// Poll the device in maximum `max_polling_duration` if it doesn't succeed
pub fn try_connect_polling(trezor: &Manager, duration: &Duration, dir: DeviceDirection) -> bool {
let start_time = Instant::now();
while start_time.elapsed() <= *duration {
if let Ok(num_devices) = trezor.update_devices(dir) {
trace!(target: "hw", "{} Trezor devices {}", num_devices, dir);
return true
}
}
false
let start_time = Instant::now();
while start_time.elapsed() <= *duration {
if let Ok(num_devices) = trezor.update_devices(dir) {
trace!(target: "hw", "{} Trezor devices {}", num_devices, dir);
return true;
}
}
false
}
/// Check if the detected device is a Trezor device by checking both the product ID and the vendor ID
pub fn is_valid_trezor(vid: u16, pid: u16) -> bool {
vid == TREZOR_VID && TREZOR_PIDS.contains(&pid)
vid == TREZOR_VID && TREZOR_PIDS.contains(&pid)
}
#[test]
@ -433,31 +502,39 @@ pub fn is_valid_trezor(vid: u16, pid: u16) -> bool {
/// This test can't be run without an actual trezor device connected
/// (and unlocked) attached to the machine that's running the test
fn test_signature() {
use ethereum_types::Address;
use MAX_POLLING_DURATION;
use super::HardwareWalletManager;
use super::HardwareWalletManager;
use ethereum_types::Address;
use MAX_POLLING_DURATION;
let manager = HardwareWalletManager::new().unwrap();
let manager = HardwareWalletManager::new().unwrap();
assert_eq!(try_connect_polling(&manager.trezor, &MAX_POLLING_DURATION, DeviceDirection::Arrived), true);
assert_eq!(
try_connect_polling(
&manager.trezor,
&MAX_POLLING_DURATION,
DeviceDirection::Arrived
),
true
);
let addr: Address = manager.list_wallets()
.iter()
.filter(|d| d.name == "TREZOR".to_string() && d.manufacturer == "SatoshiLabs".to_string())
.nth(0)
.map(|d| d.address)
.unwrap();
let addr: Address = manager
.list_wallets()
.iter()
.filter(|d| d.name == "TREZOR".to_string() && d.manufacturer == "SatoshiLabs".to_string())
.nth(0)
.map(|d| d.address)
.unwrap();
let t_info = TransactionInfo {
nonce: U256::from(1),
gas_price: U256::from(100),
gas_limit: U256::from(21_000),
to: Some(Address::from(1337)),
chain_id: Some(1),
value: U256::from(1_000_000),
data: (&[1u8; 3000]).to_vec(),
};
let t_info = TransactionInfo {
nonce: U256::from(1),
gas_price: U256::from(100),
gas_limit: U256::from(21_000),
to: Some(Address::from(1337)),
chain_id: Some(1),
value: U256::from(1_000_000),
data: (&[1u8; 3000]).to_vec(),
};
let signature = manager.trezor.sign_transaction(&addr, &t_info);
assert!(signature.is_ok());
let signature = manager.trezor.sign_transaction(&addr, &t_info);
assert!(signature.is_ok());
}

View File

@ -16,58 +16,56 @@
//! Account Metadata
use std::{
collections::HashMap,
time::Instant,
};
use std::{collections::HashMap, time::Instant};
use ethkey::{Address, Password};
use serde_derive::{Serialize, Deserialize};
use serde_derive::{Deserialize, Serialize};
use serde_json;
/// Type of unlock.
#[derive(Clone, PartialEq)]
pub enum Unlock {
/// If account is unlocked temporarily, it should be locked after first usage.
OneTime,
/// Account unlocked permanently can always sign message.
/// Use with caution.
Perm,
/// Account unlocked with a timeout
Timed(Instant),
/// If account is unlocked temporarily, it should be locked after first usage.
OneTime,
/// Account unlocked permanently can always sign message.
/// Use with caution.
Perm,
/// Account unlocked with a timeout
Timed(Instant),
}
/// Data associated with account.
#[derive(Clone)]
pub struct AccountData {
pub unlock: Unlock,
pub password: Password,
pub unlock: Unlock,
pub password: Password,
}
/// Collected account metadata
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AccountMeta {
/// The name of the account.
pub name: String,
/// The rest of the metadata of the account.
pub meta: String,
/// The 128-bit Uuid of the account, if it has one (brain-wallets don't).
pub uuid: Option<String>,
/// The name of the account.
pub name: String,
/// The rest of the metadata of the account.
pub meta: String,
/// The 128-bit Uuid of the account, if it has one (brain-wallets don't).
pub uuid: Option<String>,
}
impl AccountMeta {
/// Read a hash map of Address -> AccountMeta
pub fn read<R>(reader: R) -> Result<HashMap<Address, Self>, serde_json::Error> where
R: ::std::io::Read,
{
serde_json::from_reader(reader)
}
/// Read a hash map of Address -> AccountMeta
pub fn read<R>(reader: R) -> Result<HashMap<Address, Self>, serde_json::Error>
where
R: ::std::io::Read,
{
serde_json::from_reader(reader)
}
/// Write a hash map of Address -> AccountMeta
pub fn write<W>(m: &HashMap<Address, Self>, writer: &mut W) -> Result<(), serde_json::Error> where
W: ::std::io::Write,
{
serde_json::to_writer(writer, m)
}
/// Write a hash map of Address -> AccountMeta
pub fn write<W>(m: &HashMap<Address, Self>, writer: &mut W) -> Result<(), serde_json::Error>
where
W: ::std::io::Write,
{
serde_json::to_writer(writer, m)
}
}

View File

@ -16,41 +16,41 @@
use std::fmt;
use ethstore::{Error as SSError};
use hardware_wallet::{Error as HardwareError};
use ethstore::Error as SSError;
use hardware_wallet::Error as HardwareError;
/// Signing error
#[derive(Debug)]
pub enum SignError {
/// Account is not unlocked
NotUnlocked,
/// Account does not exist.
NotFound,
/// Low-level hardware device error.
Hardware(HardwareError),
/// Low-level error from store
SStore(SSError),
/// Account is not unlocked
NotUnlocked,
/// Account does not exist.
NotFound,
/// Low-level hardware device error.
Hardware(HardwareError),
/// Low-level error from store
SStore(SSError),
}
impl fmt::Display for SignError {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
SignError::NotUnlocked => write!(f, "Account is locked"),
SignError::NotFound => write!(f, "Account does not exist"),
SignError::Hardware(ref e) => write!(f, "{}", e),
SignError::SStore(ref e) => write!(f, "{}", e),
}
}
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
SignError::NotUnlocked => write!(f, "Account is locked"),
SignError::NotFound => write!(f, "Account does not exist"),
SignError::Hardware(ref e) => write!(f, "{}", e),
SignError::SStore(ref e) => write!(f, "{}", e),
}
}
}
impl From<HardwareError> for SignError {
fn from(e: HardwareError) -> Self {
SignError::Hardware(e)
}
fn from(e: HardwareError) -> Self {
SignError::Hardware(e)
}
}
impl From<SSError> for SignError {
fn from(e: SSError) -> Self {
SignError::SStore(e)
}
fn from(e: SSError) -> Self {
SignError::SStore(e)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -16,9 +16,11 @@
//! Address Book Store
use std::{fs, fmt, hash, ops};
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::{
collections::HashMap,
fmt, fs, hash, ops,
path::{Path, PathBuf},
};
use ethkey::Address;
use log::{trace, warn};
@ -27,163 +29,209 @@ use crate::AccountMeta;
/// Disk-backed map from Address to String. Uses JSON.
pub struct AddressBook {
cache: DiskMap<Address, AccountMeta>,
cache: DiskMap<Address, AccountMeta>,
}
impl AddressBook {
/// Creates new address book at given directory.
pub fn new(path: &Path) -> Self {
let mut r = AddressBook {
cache: DiskMap::new(path, "address_book.json")
};
r.cache.revert(AccountMeta::read);
r
}
/// Creates new address book at given directory.
pub fn new(path: &Path) -> Self {
let mut r = AddressBook {
cache: DiskMap::new(path, "address_book.json"),
};
r.cache.revert(AccountMeta::read);
r
}
/// Creates transient address book (no changes are saved to disk).
pub fn transient() -> Self {
AddressBook {
cache: DiskMap::transient()
}
}
/// Creates transient address book (no changes are saved to disk).
pub fn transient() -> Self {
AddressBook {
cache: DiskMap::transient(),
}
}
/// Get the address book.
pub fn get(&self) -> HashMap<Address, AccountMeta> {
self.cache.clone()
}
/// Get the address book.
pub fn get(&self) -> HashMap<Address, AccountMeta> {
self.cache.clone()
}
fn save(&self) {
self.cache.save(AccountMeta::write)
}
fn save(&self) {
self.cache.save(AccountMeta::write)
}
/// Sets new name for given address.
pub fn set_name(&mut self, a: Address, name: String) {
{
let x = self.cache.entry(a)
.or_insert_with(|| AccountMeta {name: Default::default(), meta: "{}".to_owned(), uuid: None});
x.name = name;
}
self.save();
}
/// Sets new name for given address.
pub fn set_name(&mut self, a: Address, name: String) {
{
let x = self.cache.entry(a).or_insert_with(|| AccountMeta {
name: Default::default(),
meta: "{}".to_owned(),
uuid: None,
});
x.name = name;
}
self.save();
}
/// Sets new meta for given address.
pub fn set_meta(&mut self, a: Address, meta: String) {
{
let x = self.cache.entry(a)
.or_insert_with(|| AccountMeta {name: "Anonymous".to_owned(), meta: Default::default(), uuid: None});
x.meta = meta;
}
self.save();
}
/// Sets new meta for given address.
pub fn set_meta(&mut self, a: Address, meta: String) {
{
let x = self.cache.entry(a).or_insert_with(|| AccountMeta {
name: "Anonymous".to_owned(),
meta: Default::default(),
uuid: None,
});
x.meta = meta;
}
self.save();
}
/// Removes an entry
pub fn remove(&mut self, a: Address) {
self.cache.remove(&a);
self.save();
}
/// Removes an entry
pub fn remove(&mut self, a: Address) {
self.cache.remove(&a);
self.save();
}
}
/// Disk-serializable HashMap
#[derive(Debug)]
struct DiskMap<K: hash::Hash + Eq, V> {
path: PathBuf,
cache: HashMap<K, V>,
transient: bool,
path: PathBuf,
cache: HashMap<K, V>,
transient: bool,
}
impl<K: hash::Hash + Eq, V> ops::Deref for DiskMap<K, V> {
type Target = HashMap<K, V>;
fn deref(&self) -> &Self::Target {
&self.cache
}
type Target = HashMap<K, V>;
fn deref(&self) -> &Self::Target {
&self.cache
}
}
impl<K: hash::Hash + Eq, V> ops::DerefMut for DiskMap<K, V> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.cache
}
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.cache
}
}
impl<K: hash::Hash + Eq, V> DiskMap<K, V> {
pub fn new(path: &Path, file_name: &str) -> Self {
let mut path = path.to_owned();
path.push(file_name);
trace!(target: "diskmap", "path={:?}", path);
DiskMap {
path: path,
cache: HashMap::new(),
transient: false,
}
}
pub fn new(path: &Path, file_name: &str) -> Self {
let mut path = path.to_owned();
path.push(file_name);
trace!(target: "diskmap", "path={:?}", path);
DiskMap {
path: path,
cache: HashMap::new(),
transient: false,
}
}
pub fn transient() -> Self {
let mut map = DiskMap::new(&PathBuf::new(), "diskmap.json".into());
map.transient = true;
map
}
pub fn transient() -> Self {
let mut map = DiskMap::new(&PathBuf::new(), "diskmap.json".into());
map.transient = true;
map
}
fn revert<F, E>(&mut self, read: F) where
F: Fn(fs::File) -> Result<HashMap<K, V>, E>,
E: fmt::Display,
{
if self.transient { return; }
trace!(target: "diskmap", "revert {:?}", self.path);
let _ = fs::File::open(self.path.clone())
.map_err(|e| trace!(target: "diskmap", "Couldn't open disk map: {}", e))
.and_then(|f| read(f).map_err(|e| warn!(target: "diskmap", "Couldn't read disk map: {}", e)))
.and_then(|m| {
self.cache = m;
Ok(())
});
}
fn revert<F, E>(&mut self, read: F)
where
F: Fn(fs::File) -> Result<HashMap<K, V>, E>,
E: fmt::Display,
{
if self.transient {
return;
}
trace!(target: "diskmap", "revert {:?}", self.path);
let _ = fs::File::open(self.path.clone())
.map_err(|e| trace!(target: "diskmap", "Couldn't open disk map: {}", e))
.and_then(|f| {
read(f).map_err(|e| warn!(target: "diskmap", "Couldn't read disk map: {}", e))
})
.and_then(|m| {
self.cache = m;
Ok(())
});
}
fn save<F, E>(&self, write: F) where
F: Fn(&HashMap<K, V>, &mut fs::File) -> Result<(), E>,
E: fmt::Display,
{
if self.transient { return; }
trace!(target: "diskmap", "save {:?}", self.path);
let _ = fs::File::create(self.path.clone())
.map_err(|e| warn!(target: "diskmap", "Couldn't open disk map for writing: {}", e))
.and_then(|mut f| {
write(&self.cache, &mut f).map_err(|e| warn!(target: "diskmap", "Couldn't write to disk map: {}", e))
});
}
fn save<F, E>(&self, write: F)
where
F: Fn(&HashMap<K, V>, &mut fs::File) -> Result<(), E>,
E: fmt::Display,
{
if self.transient {
return;
}
trace!(target: "diskmap", "save {:?}", self.path);
let _ = fs::File::create(self.path.clone())
.map_err(|e| warn!(target: "diskmap", "Couldn't open disk map for writing: {}", e))
.and_then(|mut f| {
write(&self.cache, &mut f)
.map_err(|e| warn!(target: "diskmap", "Couldn't write to disk map: {}", e))
});
}
}
#[cfg(test)]
mod tests {
use super::AddressBook;
use std::collections::HashMap;
use tempdir::TempDir;
use crate::account_data::AccountMeta;
use super::AddressBook;
use crate::account_data::AccountMeta;
use std::collections::HashMap;
use tempdir::TempDir;
#[test]
fn should_save_and_reload_address_book() {
let tempdir = TempDir::new("").unwrap();
let mut b = AddressBook::new(tempdir.path());
b.set_name(1.into(), "One".to_owned());
b.set_meta(1.into(), "{1:1}".to_owned());
let b = AddressBook::new(tempdir.path());
assert_eq!(b.get(), vec![
(1, AccountMeta {name: "One".to_owned(), meta: "{1:1}".to_owned(), uuid: None})
].into_iter().map(|(a, b)| (a.into(), b)).collect::<HashMap<_, _>>());
}
#[test]
fn should_save_and_reload_address_book() {
let tempdir = TempDir::new("").unwrap();
let mut b = AddressBook::new(tempdir.path());
b.set_name(1.into(), "One".to_owned());
b.set_meta(1.into(), "{1:1}".to_owned());
let b = AddressBook::new(tempdir.path());
assert_eq!(
b.get(),
vec![(
1,
AccountMeta {
name: "One".to_owned(),
meta: "{1:1}".to_owned(),
uuid: None
}
)]
.into_iter()
.map(|(a, b)| (a.into(), b))
.collect::<HashMap<_, _>>()
);
}
#[test]
fn should_remove_address() {
let tempdir = TempDir::new("").unwrap();
let mut b = AddressBook::new(tempdir.path());
#[test]
fn should_remove_address() {
let tempdir = TempDir::new("").unwrap();
let mut b = AddressBook::new(tempdir.path());
b.set_name(1.into(), "One".to_owned());
b.set_name(2.into(), "Two".to_owned());
b.set_name(3.into(), "Three".to_owned());
b.remove(2.into());
b.set_name(1.into(), "One".to_owned());
b.set_name(2.into(), "Two".to_owned());
b.set_name(3.into(), "Three".to_owned());
b.remove(2.into());
let b = AddressBook::new(tempdir.path());
assert_eq!(b.get(), vec![
(1, AccountMeta{name: "One".to_owned(), meta: "{}".to_owned(), uuid: None}),
(3, AccountMeta{name: "Three".to_owned(), meta: "{}".to_owned(), uuid: None}),
].into_iter().map(|(a, b)| (a.into(), b)).collect::<HashMap<_, _>>());
}
let b = AddressBook::new(tempdir.path());
assert_eq!(
b.get(),
vec![
(
1,
AccountMeta {
name: "One".to_owned(),
meta: "{}".to_owned(),
uuid: None
}
),
(
3,
AccountMeta {
name: "Three".to_owned(),
meta: "{}".to_owned(),
uuid: None
}
),
]
.into_iter()
.map(|(a, b)| (a.into(), b))
.collect::<HashMap<_, _>>()
);
}
}

View File

@ -14,36 +14,38 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
extern crate serde_json;
extern crate ethjson;
extern crate serde_json;
use std::{fs, env, process};
use ethjson::spec::Spec;
use std::{env, fs, process};
fn quit(s: &str) -> ! {
println!("{}", s);
process::exit(1);
println!("{}", s);
process::exit(1);
}
fn main() {
let mut args = env::args();
if args.len() != 2 {
quit("You need to specify chainspec.json\n\
let mut args = env::args();
if args.len() != 2 {
quit(
"You need to specify chainspec.json\n\
\n\
./chainspec <chainspec.json>");
}
./chainspec <chainspec.json>",
);
}
let path = args.nth(1).expect("args.len() == 2; qed");
let file = match fs::File::open(&path) {
Ok(file) => file,
Err(_) => quit(&format!("{} could not be opened", path)),
};
let path = args.nth(1).expect("args.len() == 2; qed");
let file = match fs::File::open(&path) {
Ok(file) => file,
Err(_) => quit(&format!("{} could not be opened", path)),
};
let spec: Result<Spec, _> = serde_json::from_reader(file);
let spec: Result<Spec, _> = serde_json::from_reader(file);
if let Err(err) = spec {
quit(&format!("{} {}", path, err.to_string()));
}
if let Err(err) = spec {
quit(&format!("{} {}", path, err.to_string()));
}
println!("{} is valid", path);
println!("{} is valid", path);
}

View File

@ -14,334 +14,314 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::fmt::{Debug, Formatter, Error as FmtError};
use std::io::{BufReader, BufRead};
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::collections::BTreeMap;
use std::thread;
use std::time;
use std::{
collections::BTreeMap,
fmt::{Debug, Error as FmtError, Formatter},
io::{BufRead, BufReader},
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
thread, time,
};
use std::path::PathBuf;
use hash::keccak;
use parking_lot::Mutex;
use std::{fs::File, path::PathBuf};
use url::Url;
use std::fs::File;
use ws::ws::{
self,
Request,
Handler,
Sender,
Handshake,
Error as WsError,
ErrorKind as WsErrorKind,
Message,
Result as WsResult,
self, Error as WsError, ErrorKind as WsErrorKind, Handler, Handshake, Message, Request,
Result as WsResult, Sender,
};
use serde::de::DeserializeOwned;
use serde_json::{
self as json,
Value as JsonValue,
Error as JsonError,
use serde_json::{self as json, Error as JsonError, Value as JsonValue};
use futures::{done, oneshot, Canceled, Complete, Future};
use jsonrpc_core::{
request::MethodCall,
response::{Failure, Output, Success},
Error as JsonRpcError, Id, Params, Version,
};
use futures::{Canceled, Complete, Future, oneshot, done};
use jsonrpc_core::{Id, Version, Params, Error as JsonRpcError};
use jsonrpc_core::request::MethodCall;
use jsonrpc_core::response::{Output, Success, Failure};
use BoxFuture;
/// The actual websocket connection handler, passed into the
/// event loop of ws-rs
struct RpcHandler {
pending: Pending,
// Option is used here as temporary storage until connection
// is setup and the values are moved into the new `Rpc`
complete: Option<Complete<Result<Rpc, RpcError>>>,
auth_code: String,
out: Option<Sender>,
pending: Pending,
// Option is used here as temporary storage until connection
// is setup and the values are moved into the new `Rpc`
complete: Option<Complete<Result<Rpc, RpcError>>>,
auth_code: String,
out: Option<Sender>,
}
impl RpcHandler {
fn new(
out: Sender,
auth_code: String,
complete: Complete<Result<Rpc, RpcError>>
) -> Self {
RpcHandler {
out: Some(out),
auth_code: auth_code,
pending: Pending::new(),
complete: Some(complete),
}
}
fn new(out: Sender, auth_code: String, complete: Complete<Result<Rpc, RpcError>>) -> Self {
RpcHandler {
out: Some(out),
auth_code: auth_code,
pending: Pending::new(),
complete: Some(complete),
}
}
}
impl Handler for RpcHandler {
fn build_request(&mut self, url: &Url) -> WsResult<Request> {
match Request::from_url(url) {
Ok(mut r) => {
let timestamp = time::UNIX_EPOCH.elapsed().map_err(|err| {
WsError::new(WsErrorKind::Internal, format!("{}", err))
})?;
let secs = timestamp.as_secs();
let hashed = keccak(format!("{}:{}", self.auth_code, secs));
let proto = format!("{:x}_{}", hashed, secs);
r.add_protocol(&proto);
Ok(r)
},
Err(e) =>
Err(WsError::new(WsErrorKind::Internal, format!("{}", e))),
}
}
fn on_error(&mut self, err: WsError) {
match self.complete.take() {
Some(c) => match c.send(Err(RpcError::WsError(err))) {
Ok(_) => {},
Err(_) => warn!(target: "rpc-client", "Unable to notify about error."),
},
None => warn!(target: "rpc-client", "unexpected error: {}", err),
}
}
fn on_open(&mut self, _: Handshake) -> WsResult<()> {
match (self.complete.take(), self.out.take()) {
(Some(c), Some(out)) => {
let res = c.send(Ok(Rpc {
out: out,
counter: AtomicUsize::new(0),
pending: self.pending.clone(),
}));
if let Err(_) = res {
warn!(target: "rpc-client", "Unable to open a connection.")
}
Ok(())
},
_ => {
let msg = format!("on_open called twice");
Err(WsError::new(WsErrorKind::Internal, msg))
}
}
}
fn on_message(&mut self, msg: Message) -> WsResult<()> {
let ret: Result<JsonValue, JsonRpcError>;
let response_id;
let string = &msg.to_string();
match json::from_str::<Output>(&string) {
Ok(Output::Success(Success { result, id: Id::Num(id), .. })) =>
{
ret = Ok(result);
response_id = id as usize;
}
Ok(Output::Failure(Failure { error, id: Id::Num(id), .. })) => {
ret = Err(error);
response_id = id as usize;
}
Err(e) => {
warn!(
target: "rpc-client",
"recieved invalid message: {}\n {:?}",
string,
e
);
return Ok(())
},
_ => {
warn!(
target: "rpc-client",
"recieved invalid message: {}",
string
);
return Ok(())
}
}
fn build_request(&mut self, url: &Url) -> WsResult<Request> {
match Request::from_url(url) {
Ok(mut r) => {
let timestamp = time::UNIX_EPOCH
.elapsed()
.map_err(|err| WsError::new(WsErrorKind::Internal, format!("{}", err)))?;
let secs = timestamp.as_secs();
let hashed = keccak(format!("{}:{}", self.auth_code, secs));
let proto = format!("{:x}_{}", hashed, secs);
r.add_protocol(&proto);
Ok(r)
}
Err(e) => Err(WsError::new(WsErrorKind::Internal, format!("{}", e))),
}
}
fn on_error(&mut self, err: WsError) {
match self.complete.take() {
Some(c) => match c.send(Err(RpcError::WsError(err))) {
Ok(_) => {}
Err(_) => warn!(target: "rpc-client", "Unable to notify about error."),
},
None => warn!(target: "rpc-client", "unexpected error: {}", err),
}
}
fn on_open(&mut self, _: Handshake) -> WsResult<()> {
match (self.complete.take(), self.out.take()) {
(Some(c), Some(out)) => {
let res = c.send(Ok(Rpc {
out: out,
counter: AtomicUsize::new(0),
pending: self.pending.clone(),
}));
if let Err(_) = res {
warn!(target: "rpc-client", "Unable to open a connection.")
}
Ok(())
}
_ => {
let msg = format!("on_open called twice");
Err(WsError::new(WsErrorKind::Internal, msg))
}
}
}
fn on_message(&mut self, msg: Message) -> WsResult<()> {
let ret: Result<JsonValue, JsonRpcError>;
let response_id;
let string = &msg.to_string();
match json::from_str::<Output>(&string) {
Ok(Output::Success(Success {
result,
id: Id::Num(id),
..
})) => {
ret = Ok(result);
response_id = id as usize;
}
Ok(Output::Failure(Failure {
error,
id: Id::Num(id),
..
})) => {
ret = Err(error);
response_id = id as usize;
}
Err(e) => {
warn!(
target: "rpc-client",
"recieved invalid message: {}\n {:?}",
string,
e
);
return Ok(());
}
_ => {
warn!(
target: "rpc-client",
"recieved invalid message: {}",
string
);
return Ok(());
}
}
match self.pending.remove(response_id) {
Some(c) => if let Err(_) = c.send(ret.map_err(|err| RpcError::JsonRpc(err))) {
warn!(target: "rpc-client", "Unable to send response.")
},
None => warn!(
target: "rpc-client",
"warning: unexpected id: {}",
response_id
),
}
Ok(())
}
match self.pending.remove(response_id) {
Some(c) => {
if let Err(_) = c.send(ret.map_err(|err| RpcError::JsonRpc(err))) {
warn!(target: "rpc-client", "Unable to send response.")
}
}
None => warn!(
target: "rpc-client",
"warning: unexpected id: {}",
response_id
),
}
Ok(())
}
}
/// Keeping track of issued requests to be matched up with responses
#[derive(Clone)]
struct Pending(
Arc<Mutex<BTreeMap<usize, Complete<Result<JsonValue, RpcError>>>>>
);
struct Pending(Arc<Mutex<BTreeMap<usize, Complete<Result<JsonValue, RpcError>>>>>);
impl Pending {
fn new() -> Self {
Pending(Arc::new(Mutex::new(BTreeMap::new())))
}
fn insert(&mut self, k: usize, v: Complete<Result<JsonValue, RpcError>>) {
self.0.lock().insert(k, v);
}
fn remove(
&mut self,
k: usize
) -> Option<Complete<Result<JsonValue, RpcError>>> {
self.0.lock().remove(&k)
}
fn new() -> Self {
Pending(Arc::new(Mutex::new(BTreeMap::new())))
}
fn insert(&mut self, k: usize, v: Complete<Result<JsonValue, RpcError>>) {
self.0.lock().insert(k, v);
}
fn remove(&mut self, k: usize) -> Option<Complete<Result<JsonValue, RpcError>>> {
self.0.lock().remove(&k)
}
}
fn get_authcode(path: &PathBuf) -> Result<String, RpcError> {
if let Ok(fd) = File::open(path) {
if let Some(Ok(line)) = BufReader::new(fd).lines().next() {
let mut parts = line.split(';');
let token = parts.next();
if let Ok(fd) = File::open(path) {
if let Some(Ok(line)) = BufReader::new(fd).lines().next() {
let mut parts = line.split(';');
let token = parts.next();
if let Some(code) = token {
return Ok(code.into());
}
}
}
Err(RpcError::NoAuthCode)
if let Some(code) = token {
return Ok(code.into());
}
}
}
Err(RpcError::NoAuthCode)
}
/// The handle to the connection
pub struct Rpc {
out: Sender,
counter: AtomicUsize,
pending: Pending,
out: Sender,
counter: AtomicUsize,
pending: Pending,
}
impl Rpc {
/// Blocking, returns a new initialized connection or RpcError
pub fn new(url: &str, authpath: &PathBuf) -> Result<Self, RpcError> {
let rpc = Self::connect(url, authpath).map(|rpc| rpc).wait()?;
rpc
}
/// Blocking, returns a new initialized connection or RpcError
pub fn new(url: &str, authpath: &PathBuf) -> Result<Self, RpcError> {
let rpc = Self::connect(url, authpath).map(|rpc| rpc).wait()?;
rpc
}
/// Non-blocking, returns a future
pub fn connect(
url: &str, authpath: &PathBuf
) -> BoxFuture<Result<Self, RpcError>, Canceled> {
let (c, p) = oneshot::<Result<Self, RpcError>>();
match get_authcode(authpath) {
Err(e) => return Box::new(done(Ok(Err(e)))),
Ok(code) => {
let url = String::from(url);
// The ws::connect takes a FnMut closure, which means c cannot
// be moved into it, since it's consumed on complete.
// Therefore we wrap it in an option and pick it out once.
let mut once = Some(c);
thread::spawn(move || {
let conn = ws::connect(url, |out| {
// this will panic if the closure is called twice,
// which it should never be.
let c = once.take()
.expect("connection closure called only once");
RpcHandler::new(out, code.clone(), c)
});
match conn {
Err(err) => {
// since ws::connect is only called once, it cannot
// both fail and succeed.
let c = once.take()
.expect("connection closure called only once");
let _ = c.send(Err(RpcError::WsError(err)));
},
// c will complete on the `on_open` event in the Handler
_ => ()
}
});
Box::new(p)
}
}
}
/// Non-blocking, returns a future
pub fn connect(url: &str, authpath: &PathBuf) -> BoxFuture<Result<Self, RpcError>, Canceled> {
let (c, p) = oneshot::<Result<Self, RpcError>>();
match get_authcode(authpath) {
Err(e) => return Box::new(done(Ok(Err(e)))),
Ok(code) => {
let url = String::from(url);
// The ws::connect takes a FnMut closure, which means c cannot
// be moved into it, since it's consumed on complete.
// Therefore we wrap it in an option and pick it out once.
let mut once = Some(c);
thread::spawn(move || {
let conn = ws::connect(url, |out| {
// this will panic if the closure is called twice,
// which it should never be.
let c = once.take().expect("connection closure called only once");
RpcHandler::new(out, code.clone(), c)
});
match conn {
Err(err) => {
// since ws::connect is only called once, it cannot
// both fail and succeed.
let c = once.take().expect("connection closure called only once");
let _ = c.send(Err(RpcError::WsError(err)));
}
// c will complete on the `on_open` event in the Handler
_ => (),
}
});
Box::new(p)
}
}
}
/// Non-blocking, returns a future of the request response
pub fn request<T>(
&mut self, method: &'static str, params: Vec<JsonValue>
) -> BoxFuture<Result<T, RpcError>, Canceled>
where T: DeserializeOwned + Send + Sized {
/// Non-blocking, returns a future of the request response
pub fn request<T>(
&mut self,
method: &'static str,
params: Vec<JsonValue>,
) -> BoxFuture<Result<T, RpcError>, Canceled>
where
T: DeserializeOwned + Send + Sized,
{
let (c, p) = oneshot::<Result<JsonValue, RpcError>>();
let (c, p) = oneshot::<Result<JsonValue, RpcError>>();
let id = self.counter.fetch_add(1, Ordering::Relaxed);
self.pending.insert(id, c);
let id = self.counter.fetch_add(1, Ordering::Relaxed);
self.pending.insert(id, c);
let request = MethodCall {
jsonrpc: Some(Version::V2),
method: method.to_owned(),
params: Params::Array(params),
id: Id::Num(id as u64),
};
let request = MethodCall {
jsonrpc: Some(Version::V2),
method: method.to_owned(),
params: Params::Array(params),
id: Id::Num(id as u64),
};
let serialized = json::to_string(&request).expect("request is serializable");
let _ = self.out.send(serialized);
let serialized = json::to_string(&request)
.expect("request is serializable");
let _ = self.out.send(serialized);
Box::new(p.map(|result| {
match result {
Ok(json) => {
let t: T = json::from_value(json)?;
Ok(t)
},
Err(err) => Err(err)
}
}))
}
Box::new(p.map(|result| match result {
Ok(json) => {
let t: T = json::from_value(json)?;
Ok(t)
}
Err(err) => Err(err),
}))
}
}
pub enum RpcError {
WrongVersion(String),
ParseError(JsonError),
MalformedResponse(String),
JsonRpc(JsonRpcError),
WsError(WsError),
Canceled(Canceled),
UnexpectedId,
NoAuthCode,
WrongVersion(String),
ParseError(JsonError),
MalformedResponse(String),
JsonRpc(JsonRpcError),
WsError(WsError),
Canceled(Canceled),
UnexpectedId,
NoAuthCode,
}
impl Debug for RpcError {
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
match *self {
RpcError::WrongVersion(ref s)
=> write!(f, "Expected version 2.0, got {}", s),
RpcError::ParseError(ref err)
=> write!(f, "ParseError: {}", err),
RpcError::MalformedResponse(ref s)
=> write!(f, "Malformed response: {}", s),
RpcError::JsonRpc(ref json)
=> write!(f, "JsonRpc error: {:?}", json),
RpcError::WsError(ref s)
=> write!(f, "Websocket error: {}", s),
RpcError::Canceled(ref s)
=> write!(f, "Futures error: {:?}", s),
RpcError::UnexpectedId
=> write!(f, "Unexpected response id"),
RpcError::NoAuthCode
=> write!(f, "No authcodes available"),
}
}
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
match *self {
RpcError::WrongVersion(ref s) => write!(f, "Expected version 2.0, got {}", s),
RpcError::ParseError(ref err) => write!(f, "ParseError: {}", err),
RpcError::MalformedResponse(ref s) => write!(f, "Malformed response: {}", s),
RpcError::JsonRpc(ref json) => write!(f, "JsonRpc error: {:?}", json),
RpcError::WsError(ref s) => write!(f, "Websocket error: {}", s),
RpcError::Canceled(ref s) => write!(f, "Futures error: {:?}", s),
RpcError::UnexpectedId => write!(f, "Unexpected response id"),
RpcError::NoAuthCode => write!(f, "No authcodes available"),
}
}
}
impl From<JsonError> for RpcError {
fn from(err: JsonError) -> RpcError {
RpcError::ParseError(err)
}
fn from(err: JsonError) -> RpcError {
RpcError::ParseError(err)
}
}
impl From<WsError> for RpcError {
fn from(err: WsError) -> RpcError {
RpcError::WsError(err)
}
fn from(err: WsError) -> RpcError {
RpcError::WsError(err)
}
}
impl From<Canceled> for RpcError {
fn from(err: Canceled) -> RpcError {
RpcError::Canceled(err)
}
fn from(err: Canceled) -> RpcError {
RpcError::Canceled(err)
}
}

View File

@ -21,12 +21,12 @@ extern crate ethereum_types;
extern crate futures;
extern crate jsonrpc_core;
extern crate jsonrpc_ws_server as ws;
extern crate keccak_hash as hash;
extern crate parity_rpc as rpc;
extern crate parking_lot;
extern crate serde;
extern crate serde_json;
extern crate url;
extern crate keccak_hash as hash;
#[macro_use]
extern crate log;
@ -36,56 +36,55 @@ extern crate log;
extern crate matches;
/// Boxed future response.
pub type BoxFuture<T, E> = Box<futures::Future<Item=T, Error=E> + Send>;
pub type BoxFuture<T, E> = Box<futures::Future<Item = T, Error = E> + Send>;
#[cfg(test)]
mod tests {
use futures::Future;
use std::path::PathBuf;
use client::{Rpc, RpcError};
use rpc;
use client::{Rpc, RpcError};
use futures::Future;
use rpc;
use std::path::PathBuf;
#[test]
fn test_connection_refused() {
let (_srv, port, mut authcodes) = rpc::tests::ws::serve();
#[test]
fn test_connection_refused() {
let (_srv, port, mut authcodes) = rpc::tests::ws::serve();
let _ = authcodes.generate_new();
authcodes.to_file(&authcodes.path).unwrap();
let _ = authcodes.generate_new();
authcodes.to_file(&authcodes.path).unwrap();
let connect = Rpc::connect(&format!("ws://127.0.0.1:{}", port - 1),
&authcodes.path);
let connect = Rpc::connect(&format!("ws://127.0.0.1:{}", port - 1), &authcodes.path);
let _ = connect.map(|conn| {
assert!(matches!(&conn, &Err(RpcError::WsError(_))));
}).wait();
}
let _ = connect
.map(|conn| {
assert!(matches!(&conn, &Err(RpcError::WsError(_))));
})
.wait();
}
#[test]
fn test_authcode_fail() {
let (_srv, port, _) = rpc::tests::ws::serve();
let path = PathBuf::from("nonexist");
#[test]
fn test_authcode_fail() {
let (_srv, port, _) = rpc::tests::ws::serve();
let path = PathBuf::from("nonexist");
let connect = Rpc::connect(&format!("ws://127.0.0.1:{}", port), &path);
let connect = Rpc::connect(&format!("ws://127.0.0.1:{}", port), &path);
let _ = connect.map(|conn| {
assert!(matches!(&conn, &Err(RpcError::NoAuthCode)));
}).wait();
}
let _ = connect
.map(|conn| {
assert!(matches!(&conn, &Err(RpcError::NoAuthCode)));
})
.wait();
}
#[test]
fn test_authcode_correct() {
let (_srv, port, mut authcodes) = rpc::tests::ws::serve();
#[test]
fn test_authcode_correct() {
let (_srv, port, mut authcodes) = rpc::tests::ws::serve();
let _ = authcodes.generate_new();
authcodes.to_file(&authcodes.path).unwrap();
let _ = authcodes.generate_new();
authcodes.to_file(&authcodes.path).unwrap();
let connect = Rpc::connect(&format!("ws://127.0.0.1:{}", port),
&authcodes.path);
let _ = connect.map(|conn| {
assert!(conn.is_ok())
}).wait();
}
let connect = Rpc::connect(&format!("ws://127.0.0.1:{}", port), &authcodes.path);
let _ = connect.map(|conn| assert!(conn.is_ok())).wait();
}
}

View File

@ -16,48 +16,61 @@
use client::{Rpc, RpcError};
use ethereum_types::U256;
use rpc::signer::{ConfirmationRequest, TransactionModification, TransactionCondition};
use futures::Canceled;
use rpc::signer::{ConfirmationRequest, TransactionCondition, TransactionModification};
use serde;
use serde_json::{Value as JsonValue, to_value};
use serde_json::{to_value, Value as JsonValue};
use std::path::PathBuf;
use futures::{Canceled};
use {BoxFuture};
use BoxFuture;
pub struct SignerRpc {
rpc: Rpc,
rpc: Rpc,
}
impl SignerRpc {
pub fn new(url: &str, authfile: &PathBuf) -> Result<Self, RpcError> {
Ok(SignerRpc { rpc: Rpc::new(&url, authfile)? })
}
pub fn new(url: &str, authfile: &PathBuf) -> Result<Self, RpcError> {
Ok(SignerRpc {
rpc: Rpc::new(&url, authfile)?,
})
}
pub fn requests_to_confirm(&mut self) -> BoxFuture<Result<Vec<ConfirmationRequest>, RpcError>, Canceled> {
self.rpc.request("signer_requestsToConfirm", vec![])
}
pub fn requests_to_confirm(
&mut self,
) -> BoxFuture<Result<Vec<ConfirmationRequest>, RpcError>, Canceled> {
self.rpc.request("signer_requestsToConfirm", vec![])
}
pub fn confirm_request(
&mut self,
id: U256,
new_gas: Option<U256>,
new_gas_price: Option<U256>,
new_condition: Option<Option<TransactionCondition>>,
pwd: &str
) -> BoxFuture<Result<U256, RpcError>, Canceled> {
self.rpc.request("signer_confirmRequest", vec![
Self::to_value(&format!("{:#x}", id)),
Self::to_value(&TransactionModification { sender: None, gas_price: new_gas_price, gas: new_gas, condition: new_condition }),
Self::to_value(&pwd),
])
}
pub fn confirm_request(
&mut self,
id: U256,
new_gas: Option<U256>,
new_gas_price: Option<U256>,
new_condition: Option<Option<TransactionCondition>>,
pwd: &str,
) -> BoxFuture<Result<U256, RpcError>, Canceled> {
self.rpc.request(
"signer_confirmRequest",
vec![
Self::to_value(&format!("{:#x}", id)),
Self::to_value(&TransactionModification {
sender: None,
gas_price: new_gas_price,
gas: new_gas,
condition: new_condition,
}),
Self::to_value(&pwd),
],
)
}
pub fn reject_request(&mut self, id: U256) -> BoxFuture<Result<bool, RpcError>, Canceled> {
self.rpc.request("signer_rejectRequest", vec![
JsonValue::String(format!("{:#x}", id))
])
}
pub fn reject_request(&mut self, id: U256) -> BoxFuture<Result<bool, RpcError>, Canceled> {
self.rpc.request(
"signer_rejectRequest",
vec![JsonValue::String(format!("{:#x}", id))],
)
}
fn to_value<T: serde::Serialize>(v: &T) -> JsonValue {
to_value(v).expect("Our types are always serializable; qed")
}
fn to_value<T: serde::Serialize>(v: &T) -> JsonValue {
to_value(v).expect("Our types are always serializable; qed")
}
}

View File

@ -21,177 +21,142 @@ extern crate rpassword;
extern crate parity_rpc as rpc;
extern crate parity_rpc_client as client;
use client::signer_client::SignerRpc;
use ethereum_types::U256;
use rpc::signer::ConfirmationRequest;
use client::signer_client::SignerRpc;
use std::io::{Write, BufRead, BufReader, stdout, stdin};
use std::path::PathBuf;
use std::fs::File;
use std::{
fs::File,
io::{stdin, stdout, BufRead, BufReader, Write},
path::PathBuf,
};
use futures::Future;
fn sign_interactive(
signer: &mut SignerRpc,
password: &str,
request: ConfirmationRequest
) {
print!("\n{}\nSign this transaction? (y)es/(N)o/(r)eject: ", request);
let _ = stdout().flush();
match BufReader::new(stdin()).lines().next() {
Some(Ok(line)) => {
match line.to_lowercase().chars().nth(0) {
Some('y') => {
match sign_transaction(signer, request.id, password) {
Ok(s) | Err(s) => println!("{}", s),
}
}
Some('r') => {
match reject_transaction(signer, request.id) {
Ok(s) | Err(s) => println!("{}", s),
}
}
_ => ()
}
}
_ => println!("Could not read from stdin")
}
fn sign_interactive(signer: &mut SignerRpc, password: &str, request: ConfirmationRequest) {
print!(
"\n{}\nSign this transaction? (y)es/(N)o/(r)eject: ",
request
);
let _ = stdout().flush();
match BufReader::new(stdin()).lines().next() {
Some(Ok(line)) => match line.to_lowercase().chars().nth(0) {
Some('y') => match sign_transaction(signer, request.id, password) {
Ok(s) | Err(s) => println!("{}", s),
},
Some('r') => match reject_transaction(signer, request.id) {
Ok(s) | Err(s) => println!("{}", s),
},
_ => (),
},
_ => println!("Could not read from stdin"),
}
}
fn sign_transactions(
signer: &mut SignerRpc,
password: String
) -> Result<String, String> {
signer.requests_to_confirm().map(|reqs| {
match reqs {
Ok(ref reqs) if reqs.is_empty() => {
Ok("No transactions in signing queue".to_owned())
}
Ok(reqs) => {
for r in reqs {
sign_interactive(signer, &password, r)
}
Ok("".to_owned())
}
Err(err) => {
Err(format!("error: {:?}", err))
}
}
}).map_err(|err| {
format!("{:?}", err)
}).wait()?
fn sign_transactions(signer: &mut SignerRpc, password: String) -> Result<String, String> {
signer
.requests_to_confirm()
.map(|reqs| match reqs {
Ok(ref reqs) if reqs.is_empty() => Ok("No transactions in signing queue".to_owned()),
Ok(reqs) => {
for r in reqs {
sign_interactive(signer, &password, r)
}
Ok("".to_owned())
}
Err(err) => Err(format!("error: {:?}", err)),
})
.map_err(|err| format!("{:?}", err))
.wait()?
}
fn list_transactions(signer: &mut SignerRpc) -> Result<String, String> {
signer.requests_to_confirm().map(|reqs| {
match reqs {
Ok(ref reqs) if reqs.is_empty() => {
Ok("No transactions in signing queue".to_owned())
}
Ok(ref reqs) => {
Ok(format!("Transaction queue:\n{}", reqs
.iter()
.map(|r| format!("{}", r))
.collect::<Vec<String>>()
.join("\n")))
}
Err(err) => {
Err(format!("error: {:?}", err))
}
}
}).map_err(|err| {
format!("{:?}", err)
}).wait()?
signer
.requests_to_confirm()
.map(|reqs| match reqs {
Ok(ref reqs) if reqs.is_empty() => Ok("No transactions in signing queue".to_owned()),
Ok(ref reqs) => Ok(format!(
"Transaction queue:\n{}",
reqs.iter()
.map(|r| format!("{}", r))
.collect::<Vec<String>>()
.join("\n")
)),
Err(err) => Err(format!("error: {:?}", err)),
})
.map_err(|err| format!("{:?}", err))
.wait()?
}
fn sign_transaction(
signer: &mut SignerRpc, id: U256, password: &str
) -> Result<String, String> {
signer.confirm_request(id, None, None, None, password).map(|res| {
match res {
Ok(u) => Ok(format!("Signed transaction id: {:#x}", u)),
Err(e) => Err(format!("{:?}", e)),
}
}).map_err(|err| {
format!("{:?}", err)
}).wait()?
fn sign_transaction(signer: &mut SignerRpc, id: U256, password: &str) -> Result<String, String> {
signer
.confirm_request(id, None, None, None, password)
.map(|res| match res {
Ok(u) => Ok(format!("Signed transaction id: {:#x}", u)),
Err(e) => Err(format!("{:?}", e)),
})
.map_err(|err| format!("{:?}", err))
.wait()?
}
fn reject_transaction(
signer: &mut SignerRpc, id: U256) -> Result<String, String>
{
signer.reject_request(id).map(|res| {
match res {
Ok(true) => Ok(format!("Rejected transaction id {:#x}", id)),
Ok(false) => Err(format!("No such request")),
Err(e) => Err(format!("{:?}", e)),
}
}).map_err(|err| {
format!("{:?}", err)
}).wait()?
fn reject_transaction(signer: &mut SignerRpc, id: U256) -> Result<String, String> {
signer
.reject_request(id)
.map(|res| match res {
Ok(true) => Ok(format!("Rejected transaction id {:#x}", id)),
Ok(false) => Err(format!("No such request")),
Err(e) => Err(format!("{:?}", e)),
})
.map_err(|err| format!("{:?}", err))
.wait()?
}
// cmds
pub fn signer_list(
signerport: u16, authfile: PathBuf
) -> Result<String, String> {
let addr = &format!("ws://127.0.0.1:{}", signerport);
let mut signer = SignerRpc::new(addr, &authfile).map_err(|err| {
format!("{:?}", err)
})?;
list_transactions(&mut signer)
pub fn signer_list(signerport: u16, authfile: PathBuf) -> Result<String, String> {
let addr = &format!("ws://127.0.0.1:{}", signerport);
let mut signer = SignerRpc::new(addr, &authfile).map_err(|err| format!("{:?}", err))?;
list_transactions(&mut signer)
}
pub fn signer_reject(
id: Option<usize>, signerport: u16, authfile: PathBuf
id: Option<usize>,
signerport: u16,
authfile: PathBuf,
) -> Result<String, String> {
let id = id.ok_or(format!("id required for signer reject"))?;
let addr = &format!("ws://127.0.0.1:{}", signerport);
let mut signer = SignerRpc::new(addr, &authfile).map_err(|err| {
format!("{:?}", err)
})?;
reject_transaction(&mut signer, U256::from(id))
let id = id.ok_or(format!("id required for signer reject"))?;
let addr = &format!("ws://127.0.0.1:{}", signerport);
let mut signer = SignerRpc::new(addr, &authfile).map_err(|err| format!("{:?}", err))?;
reject_transaction(&mut signer, U256::from(id))
}
pub fn signer_sign(
id: Option<usize>,
pwfile: Option<PathBuf>,
signerport: u16,
authfile: PathBuf
id: Option<usize>,
pwfile: Option<PathBuf>,
signerport: u16,
authfile: PathBuf,
) -> Result<String, String> {
let password;
match pwfile {
Some(pwfile) => {
match File::open(pwfile) {
Ok(fd) => {
match BufReader::new(fd).lines().next() {
Some(Ok(line)) => password = line,
_ => return Err(format!("No password in file"))
}
},
Err(e) =>
return Err(format!("Could not open password file: {}", e))
}
}
None => {
password = match rpassword::prompt_password_stdout("Password: ") {
Ok(p) => p,
Err(e) => return Err(format!("{}", e)),
}
}
}
let password;
match pwfile {
Some(pwfile) => match File::open(pwfile) {
Ok(fd) => match BufReader::new(fd).lines().next() {
Some(Ok(line)) => password = line,
_ => return Err(format!("No password in file")),
},
Err(e) => return Err(format!("Could not open password file: {}", e)),
},
None => {
password = match rpassword::prompt_password_stdout("Password: ") {
Ok(p) => p,
Err(e) => return Err(format!("{}", e)),
}
}
}
let addr = &format!("ws://127.0.0.1:{}", signerport);
let mut signer = SignerRpc::new(addr, &authfile).map_err(|err| {
format!("{:?}", err)
})?;
let addr = &format!("ws://127.0.0.1:{}", signerport);
let mut signer = SignerRpc::new(addr, &authfile).map_err(|err| format!("{:?}", err))?;
match id {
Some(id) => {
sign_transaction(&mut signer, U256::from(id), &password)
},
None => {
sign_transactions(&mut signer, password)
}
}
match id {
Some(id) => sign_transaction(&mut signer, U256::from(id), &password),
None => sign_transactions(&mut signer, password),
}
}

View File

@ -21,93 +21,106 @@ extern crate ethash;
use criterion::Criterion;
use ethash::{NodeCacheBuilder, OptimizeFor};
const HASH: [u8; 32] = [0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe,
0xe4, 0x0a, 0xb3, 0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f,
0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94, 0x05, 0x52, 0x7d, 0x72];
const HASH: [u8; 32] = [
0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3, 0x35, 0x8a,
0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94, 0x05, 0x52, 0x7d, 0x72,
];
const NONCE: u64 = 0xd7b3ac70a301a249;
criterion_group!(
basic,
bench_light_compute_memmap,
bench_light_compute_memory,
bench_light_new_round_trip_memmap,
bench_light_new_round_trip_memory,
bench_light_from_file_round_trip_memory,
bench_light_from_file_round_trip_memmap
basic,
bench_light_compute_memmap,
bench_light_compute_memory,
bench_light_new_round_trip_memmap,
bench_light_new_round_trip_memory,
bench_light_from_file_round_trip_memory,
bench_light_from_file_round_trip_memmap
);
criterion_main!(basic);
fn bench_light_compute_memmap(b: &mut Criterion) {
use std::env;
use std::env;
let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value());
let light = builder.light(&env::temp_dir(), 486382);
let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value());
let light = builder.light(&env::temp_dir(), 486382);
b.bench_function("bench_light_compute_memmap", move |b| b.iter(|| light.compute(&HASH, NONCE, u64::max_value())));
b.bench_function("bench_light_compute_memmap", move |b| {
b.iter(|| light.compute(&HASH, NONCE, u64::max_value()))
});
}
fn bench_light_compute_memory(b: &mut Criterion) {
use std::env;
use std::env;
let builder = NodeCacheBuilder::new(OptimizeFor::Cpu, u64::max_value());
let light = builder.light(&env::temp_dir(), 486382);
let builder = NodeCacheBuilder::new(OptimizeFor::Cpu, u64::max_value());
let light = builder.light(&env::temp_dir(), 486382);
b.bench_function("bench_light_compute_memmap", move |b| b.iter(|| light.compute(&HASH, NONCE, u64::max_value())));
b.bench_function("bench_light_compute_memmap", move |b| {
b.iter(|| light.compute(&HASH, NONCE, u64::max_value()))
});
}
fn bench_light_new_round_trip_memmap(b: &mut Criterion) {
use std::env;
use std::env;
b.bench_function("bench_light_compute_memmap", move |b| b.iter(|| {
let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value());
let light = builder.light(&env::temp_dir(), 486382);
light.compute(&HASH, NONCE, u64::max_value());
}));
b.bench_function("bench_light_compute_memmap", move |b| {
b.iter(|| {
let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value());
let light = builder.light(&env::temp_dir(), 486382);
light.compute(&HASH, NONCE, u64::max_value());
})
});
}
fn bench_light_new_round_trip_memory(b: &mut Criterion) {
use std::env;
use std::env;
b.bench_function("bench_light_compute_memmap", move |b| b.iter(|| {
let builder = NodeCacheBuilder::new(OptimizeFor::Cpu, u64::max_value());
let light = builder.light(&env::temp_dir(), 486382);
light.compute(&HASH, NONCE, u64::max_value());
}));
b.bench_function("bench_light_compute_memmap", move |b| {
b.iter(|| {
let builder = NodeCacheBuilder::new(OptimizeFor::Cpu, u64::max_value());
let light = builder.light(&env::temp_dir(), 486382);
light.compute(&HASH, NONCE, u64::max_value());
})
});
}
fn bench_light_from_file_round_trip_memory(b: &mut Criterion) {
use std::env;
use std::env;
let dir = env::temp_dir();
let height = 486382;
{
let builder = NodeCacheBuilder::new(OptimizeFor::Cpu, u64::max_value());
let mut dummy = builder.light(&dir, height);
dummy.to_file().unwrap();
}
let dir = env::temp_dir();
let height = 486382;
{
let builder = NodeCacheBuilder::new(OptimizeFor::Cpu, u64::max_value());
let mut dummy = builder.light(&dir, height);
dummy.to_file().unwrap();
}
b.bench_function("bench_light_compute_memmap", move |b| b.iter(|| {
let builder = NodeCacheBuilder::new(OptimizeFor::Cpu, u64::max_value());
let light = builder.light_from_file(&dir, 486382).unwrap();
light.compute(&HASH, NONCE, u64::max_value());
}));
b.bench_function("bench_light_compute_memmap", move |b| {
b.iter(|| {
let builder = NodeCacheBuilder::new(OptimizeFor::Cpu, u64::max_value());
let light = builder.light_from_file(&dir, 486382).unwrap();
light.compute(&HASH, NONCE, u64::max_value());
})
});
}
fn bench_light_from_file_round_trip_memmap(b: &mut Criterion) {
use std::env;
use std::env;
let dir = env::temp_dir();
let height = 486382;
let dir = env::temp_dir();
let height = 486382;
{
let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value());
let mut dummy = builder.light(&dir, height);
dummy.to_file().unwrap();
}
{
let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value());
let mut dummy = builder.light(&dir, height);
dummy.to_file().unwrap();
}
b.bench_function("bench_light_compute_memmap", move |b| b.iter(|| {
let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value());
let light = builder.light_from_file(&dir, 486382).unwrap();
light.compute(&HASH, NONCE, u64::max_value());
}));
b.bench_function("bench_light_compute_memmap", move |b| {
b.iter(|| {
let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value());
let light = builder.light_from_file(&dir, 486382).unwrap();
light.compute(&HASH, NONCE, u64::max_value());
})
});
}

View File

@ -7,80 +7,71 @@ extern crate tempdir;
use criterion::Criterion;
use ethash::progpow;
use tempdir::TempDir;
use ethash::{compute::light_compute, NodeCacheBuilder, OptimizeFor};
use rustc_hex::FromHex;
use ethash::{NodeCacheBuilder, OptimizeFor};
use ethash::compute::light_compute;
use tempdir::TempDir;
fn bench_hashimoto_light(c: &mut Criterion) {
let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value());
let tempdir = TempDir::new("").unwrap();
let light = builder.light(&tempdir.path(), 1);
let h = FromHex::from_hex("c9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f").unwrap();
let mut hash = [0; 32];
hash.copy_from_slice(&h);
let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value());
let tempdir = TempDir::new("").unwrap();
let light = builder.light(&tempdir.path(), 1);
let h = FromHex::from_hex("c9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f")
.unwrap();
let mut hash = [0; 32];
hash.copy_from_slice(&h);
c.bench_function("hashimoto_light", move |b| {
b.iter(|| light_compute(&light, &hash, 0))
});
c.bench_function("hashimoto_light", move |b| {
b.iter(|| light_compute(&light, &hash, 0))
});
}
fn bench_progpow_light(c: &mut Criterion) {
let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value());
let tempdir = TempDir::new("").unwrap();
let cache = builder.new_cache(tempdir.into_path(), 0);
let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value());
let tempdir = TempDir::new("").unwrap();
let cache = builder.new_cache(tempdir.into_path(), 0);
let h = FromHex::from_hex("c9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f").unwrap();
let mut hash = [0; 32];
hash.copy_from_slice(&h);
let h = FromHex::from_hex("c9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f")
.unwrap();
let mut hash = [0; 32];
hash.copy_from_slice(&h);
c.bench_function("progpow_light", move |b| {
b.iter(|| {
let c_dag = progpow::generate_cdag(cache.as_ref());
progpow::progpow(
hash,
0,
0,
cache.as_ref(),
&c_dag,
);
})
});
c.bench_function("progpow_light", move |b| {
b.iter(|| {
let c_dag = progpow::generate_cdag(cache.as_ref());
progpow::progpow(hash, 0, 0, cache.as_ref(), &c_dag);
})
});
}
fn bench_progpow_optimal_light(c: &mut Criterion) {
let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value());
let tempdir = TempDir::new("").unwrap();
let cache = builder.new_cache(tempdir.into_path(), 0);
let c_dag = progpow::generate_cdag(cache.as_ref());
let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value());
let tempdir = TempDir::new("").unwrap();
let cache = builder.new_cache(tempdir.into_path(), 0);
let c_dag = progpow::generate_cdag(cache.as_ref());
let h = FromHex::from_hex("c9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f").unwrap();
let mut hash = [0; 32];
hash.copy_from_slice(&h);
let h = FromHex::from_hex("c9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f")
.unwrap();
let mut hash = [0; 32];
hash.copy_from_slice(&h);
c.bench_function("progpow_optimal_light", move |b| {
b.iter(|| {
progpow::progpow(
hash,
0,
0,
cache.as_ref(),
&c_dag,
);
})
});
c.bench_function("progpow_optimal_light", move |b| {
b.iter(|| {
progpow::progpow(hash, 0, 0, cache.as_ref(), &c_dag);
})
});
}
fn bench_keccak_f800_long(c: &mut Criterion) {
c.bench_function("keccak_f800_long(0, 0, 0)", |b| {
b.iter(|| progpow::keccak_f800_long([0; 32], 0, [0; 8]))
});
c.bench_function("keccak_f800_long(0, 0, 0)", |b| {
b.iter(|| progpow::keccak_f800_long([0; 32], 0, [0; 8]))
});
}
criterion_group!(benches,
bench_hashimoto_light,
bench_progpow_light,
bench_progpow_optimal_light,
bench_keccak_f800_long,
criterion_group!(
benches,
bench_hashimoto_light,
bench_progpow_light,
bench_progpow_optimal_light,
bench_keccak_f800_long,
);
criterion_main!(benches);

View File

@ -16,291 +16,298 @@
use compute::Light;
use either::Either;
use keccak::{H256, keccak_512};
use keccak::{keccak_512, H256};
use memmap::MmapMut;
use parking_lot::Mutex;
use seed_compute::SeedHashCompute;
use shared::{ETHASH_CACHE_ROUNDS, NODE_BYTES, NODE_DWORDS, Node, epoch, get_cache_size, to_hex};
use shared::{epoch, get_cache_size, to_hex, Node, ETHASH_CACHE_ROUNDS, NODE_BYTES, NODE_DWORDS};
use std::borrow::Cow;
use std::fs;
use std::io::{self, Read, Write};
use std::path::{Path, PathBuf};
use std::slice;
use std::sync::Arc;
use std::{
borrow::Cow,
fs,
io::{self, Read, Write},
path::{Path, PathBuf},
slice,
sync::Arc,
};
type Cache = Either<Vec<Node>, MmapMut>;
#[derive(PartialEq, Eq, Debug, Clone, Copy)]
pub enum OptimizeFor {
Cpu,
Memory,
Cpu,
Memory,
}
impl Default for OptimizeFor {
fn default() -> Self {
OptimizeFor::Cpu
}
fn default() -> Self {
OptimizeFor::Cpu
}
}
fn byte_size(cache: &Cache) -> usize {
use self::Either::{Left, Right};
use self::Either::{Left, Right};
match *cache {
Left(ref vec) => vec.len() * NODE_BYTES,
Right(ref mmap) => mmap.len(),
}
match *cache {
Left(ref vec) => vec.len() * NODE_BYTES,
Right(ref mmap) => mmap.len(),
}
}
fn new_buffer(path: &Path, num_nodes: usize, ident: &H256, optimize_for: OptimizeFor) -> Cache {
let memmap = match optimize_for {
OptimizeFor::Cpu => None,
OptimizeFor::Memory => make_memmapped_cache(path, num_nodes, ident).ok(),
};
let memmap = match optimize_for {
OptimizeFor::Cpu => None,
OptimizeFor::Memory => make_memmapped_cache(path, num_nodes, ident).ok(),
};
memmap.map(Either::Right).unwrap_or_else(|| {
Either::Left(make_memory_cache(num_nodes, ident))
})
memmap
.map(Either::Right)
.unwrap_or_else(|| Either::Left(make_memory_cache(num_nodes, ident)))
}
#[derive(Clone)]
pub struct NodeCacheBuilder {
// TODO: Remove this locking and just use an `Rc`?
seedhash: Arc<Mutex<SeedHashCompute>>,
optimize_for: OptimizeFor,
progpow_transition: u64,
// TODO: Remove this locking and just use an `Rc`?
seedhash: Arc<Mutex<SeedHashCompute>>,
optimize_for: OptimizeFor,
progpow_transition: u64,
}
// TODO: Abstract the "optimize for" logic
pub struct NodeCache {
builder: NodeCacheBuilder,
cache_dir: Cow<'static, Path>,
cache_path: PathBuf,
epoch: u64,
cache: Cache,
builder: NodeCacheBuilder,
cache_dir: Cow<'static, Path>,
cache_path: PathBuf,
epoch: u64,
cache: Cache,
}
impl NodeCacheBuilder {
pub fn light(&self, cache_dir: &Path, block_number: u64) -> Light {
Light::new_with_builder(self, cache_dir, block_number, self.progpow_transition)
}
pub fn light(&self, cache_dir: &Path, block_number: u64) -> Light {
Light::new_with_builder(self, cache_dir, block_number, self.progpow_transition)
}
pub fn light_from_file(&self, cache_dir: &Path, block_number: u64) -> io::Result<Light> {
Light::from_file_with_builder(self, cache_dir, block_number, self.progpow_transition)
}
pub fn light_from_file(&self, cache_dir: &Path, block_number: u64) -> io::Result<Light> {
Light::from_file_with_builder(self, cache_dir, block_number, self.progpow_transition)
}
pub fn new<T: Into<Option<OptimizeFor>>>(optimize_for: T, progpow_transition: u64) -> Self {
NodeCacheBuilder {
seedhash: Arc::new(Mutex::new(SeedHashCompute::default())),
optimize_for: optimize_for.into().unwrap_or_default(),
progpow_transition
}
}
pub fn new<T: Into<Option<OptimizeFor>>>(optimize_for: T, progpow_transition: u64) -> Self {
NodeCacheBuilder {
seedhash: Arc::new(Mutex::new(SeedHashCompute::default())),
optimize_for: optimize_for.into().unwrap_or_default(),
progpow_transition,
}
}
fn block_number_to_ident(&self, block_number: u64) -> H256 {
self.seedhash.lock().hash_block_number(block_number)
}
fn block_number_to_ident(&self, block_number: u64) -> H256 {
self.seedhash.lock().hash_block_number(block_number)
}
fn epoch_to_ident(&self, epoch: u64) -> H256 {
self.seedhash.lock().hash_epoch(epoch)
}
fn epoch_to_ident(&self, epoch: u64) -> H256 {
self.seedhash.lock().hash_epoch(epoch)
}
pub fn from_file<P: Into<Cow<'static, Path>>>(
&self,
cache_dir: P,
block_number: u64,
) -> io::Result<NodeCache> {
let cache_dir = cache_dir.into();
let ident = self.block_number_to_ident(block_number);
pub fn from_file<P: Into<Cow<'static, Path>>>(
&self,
cache_dir: P,
block_number: u64,
) -> io::Result<NodeCache> {
let cache_dir = cache_dir.into();
let ident = self.block_number_to_ident(block_number);
let path = cache_path(cache_dir.as_ref(), &ident);
let path = cache_path(cache_dir.as_ref(), &ident);
let cache = cache_from_path(&path, self.optimize_for)?;
let expected_cache_size = get_cache_size(block_number);
let cache = cache_from_path(&path, self.optimize_for)?;
let expected_cache_size = get_cache_size(block_number);
if byte_size(&cache) == expected_cache_size {
Ok(NodeCache {
builder: self.clone(),
epoch: epoch(block_number),
cache_dir: cache_dir,
cache_path: path,
cache: cache,
})
} else {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"Node cache is of incorrect size",
))
}
}
if byte_size(&cache) == expected_cache_size {
Ok(NodeCache {
builder: self.clone(),
epoch: epoch(block_number),
cache_dir: cache_dir,
cache_path: path,
cache: cache,
})
} else {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"Node cache is of incorrect size",
))
}
}
pub fn new_cache<P: Into<Cow<'static, Path>>>(
&self,
cache_dir: P,
block_number: u64,
) -> NodeCache {
let cache_dir = cache_dir.into();
let ident = self.block_number_to_ident(block_number);
pub fn new_cache<P: Into<Cow<'static, Path>>>(
&self,
cache_dir: P,
block_number: u64,
) -> NodeCache {
let cache_dir = cache_dir.into();
let ident = self.block_number_to_ident(block_number);
let cache_size = get_cache_size(block_number);
let cache_size = get_cache_size(block_number);
// We use `debug_assert` since it is impossible for `get_cache_size` to return an unaligned
// value with the current implementation. If the implementation changes, CI will catch it.
debug_assert!(cache_size % NODE_BYTES == 0, "Unaligned cache size");
let num_nodes = cache_size / NODE_BYTES;
// We use `debug_assert` since it is impossible for `get_cache_size` to return an unaligned
// value with the current implementation. If the implementation changes, CI will catch it.
debug_assert!(cache_size % NODE_BYTES == 0, "Unaligned cache size");
let num_nodes = cache_size / NODE_BYTES;
let path = cache_path(cache_dir.as_ref(), &ident);
let nodes = new_buffer(&path, num_nodes, &ident, self.optimize_for);
let path = cache_path(cache_dir.as_ref(), &ident);
let nodes = new_buffer(&path, num_nodes, &ident, self.optimize_for);
NodeCache {
builder: self.clone(),
epoch: epoch(block_number),
cache_dir: cache_dir.into(),
cache_path: path,
cache: nodes,
}
}
NodeCache {
builder: self.clone(),
epoch: epoch(block_number),
cache_dir: cache_dir.into(),
cache_path: path,
cache: nodes,
}
}
}
impl NodeCache {
pub fn cache_path(&self) -> &Path {
&self.cache_path
}
pub fn cache_path(&self) -> &Path {
&self.cache_path
}
pub fn flush(&mut self) -> io::Result<()> {
if let Some(last) = self.epoch.checked_sub(2).map(|ep| {
cache_path(self.cache_dir.as_ref(), &self.builder.epoch_to_ident(ep))
})
{
fs::remove_file(last).unwrap_or_else(|error| match error.kind() {
io::ErrorKind::NotFound => (),
_ => warn!("Error removing stale DAG cache: {:?}", error),
});
}
pub fn flush(&mut self) -> io::Result<()> {
if let Some(last) = self
.epoch
.checked_sub(2)
.map(|ep| cache_path(self.cache_dir.as_ref(), &self.builder.epoch_to_ident(ep)))
{
fs::remove_file(last).unwrap_or_else(|error| match error.kind() {
io::ErrorKind::NotFound => (),
_ => warn!("Error removing stale DAG cache: {:?}", error),
});
}
consume_cache(&mut self.cache, &self.cache_path)
}
consume_cache(&mut self.cache, &self.cache_path)
}
}
fn make_memmapped_cache(path: &Path, num_nodes: usize, ident: &H256) -> io::Result<MmapMut> {
use std::fs::OpenOptions;
use std::fs::OpenOptions;
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)?;
file.set_len((num_nodes * NODE_BYTES) as _)?;
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)?;
file.set_len((num_nodes * NODE_BYTES) as _)?;
let mut memmap = unsafe { MmapMut::map_mut(&file)? };
let mut memmap = unsafe { MmapMut::map_mut(&file)? };
unsafe { initialize_memory(memmap.as_mut_ptr() as *mut Node, num_nodes, ident) };
unsafe { initialize_memory(memmap.as_mut_ptr() as *mut Node, num_nodes, ident) };
Ok(memmap)
Ok(memmap)
}
fn make_memory_cache(num_nodes: usize, ident: &H256) -> Vec<Node> {
let mut nodes: Vec<Node> = Vec::with_capacity(num_nodes);
// Use uninit instead of unnecessarily writing `size_of::<Node>() * num_nodes` 0s
unsafe {
initialize_memory(nodes.as_mut_ptr(), num_nodes, ident);
nodes.set_len(num_nodes);
}
let mut nodes: Vec<Node> = Vec::with_capacity(num_nodes);
// Use uninit instead of unnecessarily writing `size_of::<Node>() * num_nodes` 0s
unsafe {
initialize_memory(nodes.as_mut_ptr(), num_nodes, ident);
nodes.set_len(num_nodes);
}
nodes
nodes
}
fn cache_path<'a, P: Into<Cow<'a, Path>>>(path: P, ident: &H256) -> PathBuf {
let mut buf = path.into().into_owned();
buf.push(to_hex(ident));
buf
let mut buf = path.into().into_owned();
buf.push(to_hex(ident));
buf
}
fn consume_cache(cache: &mut Cache, path: &Path) -> io::Result<()> {
use std::fs::OpenOptions;
use std::fs::OpenOptions;
match *cache {
Either::Left(ref mut vec) => {
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)?;
match *cache {
Either::Left(ref mut vec) => {
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)?;
let buf = unsafe {
slice::from_raw_parts_mut(vec.as_mut_ptr() as *mut u8, vec.len() * NODE_BYTES)
};
let buf = unsafe {
slice::from_raw_parts_mut(vec.as_mut_ptr() as *mut u8, vec.len() * NODE_BYTES)
};
file.write_all(buf).map(|_| ())
}
Either::Right(ref mmap) => {
mmap.flush()
}
}
file.write_all(buf).map(|_| ())
}
Either::Right(ref mmap) => mmap.flush(),
}
}
fn cache_from_path(path: &Path, optimize_for: OptimizeFor) -> io::Result<Cache> {
let memmap = match optimize_for {
OptimizeFor::Cpu => None,
OptimizeFor::Memory => {
let file = fs::OpenOptions::new().read(true).write(true).create(true).open(path)?;
unsafe { MmapMut::map_mut(&file).ok() }
},
};
let memmap = match optimize_for {
OptimizeFor::Cpu => None,
OptimizeFor::Memory => {
let file = fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(path)?;
unsafe { MmapMut::map_mut(&file).ok() }
}
};
memmap.map(Either::Right).ok_or(()).or_else(|_| {
read_from_path(path).map(Either::Left)
})
memmap
.map(Either::Right)
.ok_or(())
.or_else(|_| read_from_path(path).map(Either::Left))
}
fn read_from_path(path: &Path) -> io::Result<Vec<Node>> {
use std::fs::File;
use std::mem;
use std::{fs::File, mem};
let mut file = File::open(path)?;
let mut file = File::open(path)?;
let mut nodes: Vec<u8> = Vec::with_capacity(file.metadata().map(|m| m.len() as _).unwrap_or(
NODE_BYTES * 1_000_000,
));
file.read_to_end(&mut nodes)?;
let mut nodes: Vec<u8> = Vec::with_capacity(
file.metadata()
.map(|m| m.len() as _)
.unwrap_or(NODE_BYTES * 1_000_000),
);
file.read_to_end(&mut nodes)?;
nodes.shrink_to_fit();
nodes.shrink_to_fit();
if nodes.len() % NODE_BYTES != 0 || nodes.capacity() % NODE_BYTES != 0 {
return Err(io::Error::new(
io::ErrorKind::Other,
"Node cache is not a multiple of node size",
));
}
if nodes.len() % NODE_BYTES != 0 || nodes.capacity() % NODE_BYTES != 0 {
return Err(io::Error::new(
io::ErrorKind::Other,
"Node cache is not a multiple of node size",
));
}
let out: Vec<Node> = unsafe {
Vec::from_raw_parts(
nodes.as_mut_ptr() as *mut _,
nodes.len() / NODE_BYTES,
nodes.capacity() / NODE_BYTES,
)
};
let out: Vec<Node> = unsafe {
Vec::from_raw_parts(
nodes.as_mut_ptr() as *mut _,
nodes.len() / NODE_BYTES,
nodes.capacity() / NODE_BYTES,
)
};
mem::forget(nodes);
mem::forget(nodes);
Ok(out)
Ok(out)
}
impl AsRef<[Node]> for NodeCache {
fn as_ref(&self) -> &[Node] {
match self.cache {
Either::Left(ref vec) => vec,
Either::Right(ref mmap) => unsafe {
let bytes = mmap.as_ptr();
// This isn't a safety issue, so we can keep this a debug lint. We don't care about
// people manually messing with the files unless it can cause unsafety, but if we're
// generating incorrect files then we want to catch that in CI.
debug_assert_eq!(mmap.len() % NODE_BYTES, 0);
slice::from_raw_parts(bytes as _, mmap.len() / NODE_BYTES)
},
}
}
fn as_ref(&self) -> &[Node] {
match self.cache {
Either::Left(ref vec) => vec,
Either::Right(ref mmap) => unsafe {
let bytes = mmap.as_ptr();
// This isn't a safety issue, so we can keep this a debug lint. We don't care about
// people manually messing with the files unless it can cause unsafety, but if we're
// generating incorrect files then we want to catch that in CI.
debug_assert_eq!(mmap.len() % NODE_BYTES, 0);
slice::from_raw_parts(bytes as _, mmap.len() / NODE_BYTES)
},
}
}
}
// This takes a raw pointer and a counter because `memory` may be uninitialized. `memory` _must_ be
@ -311,47 +318,47 @@ impl AsRef<[Node]> for NodeCache {
// out. It counts as a read and causes all writes afterwards to be elided. Yes, really. I know, I
// want to refactor this to use less `unsafe` as much as the next rustacean.
unsafe fn initialize_memory(memory: *mut Node, num_nodes: usize, ident: &H256) {
let dst = memory as *mut u8;
let dst = memory as *mut u8;
debug_assert_eq!(ident.len(), 32);
keccak_512::unchecked(dst, NODE_BYTES, ident.as_ptr(), ident.len());
debug_assert_eq!(ident.len(), 32);
keccak_512::unchecked(dst, NODE_BYTES, ident.as_ptr(), ident.len());
for i in 1..num_nodes {
// We use raw pointers here, see above
let dst = memory.offset(i as _) as *mut u8;
let src = memory.offset(i as isize - 1) as *mut u8;
for i in 1..num_nodes {
// We use raw pointers here, see above
let dst = memory.offset(i as _) as *mut u8;
let src = memory.offset(i as isize - 1) as *mut u8;
keccak_512::unchecked(dst, NODE_BYTES, src, NODE_BYTES);
}
keccak_512::unchecked(dst, NODE_BYTES, src, NODE_BYTES);
}
// Now this is initialized, we can treat it as a slice.
let nodes: &mut [Node] = slice::from_raw_parts_mut(memory, num_nodes);
// Now this is initialized, we can treat it as a slice.
let nodes: &mut [Node] = slice::from_raw_parts_mut(memory, num_nodes);
// For `unroll!`, see below. If the literal in `unroll!` is not the same as the RHS here then
// these have got out of sync! Don't let this happen!
debug_assert_eq!(NODE_DWORDS, 8);
// For `unroll!`, see below. If the literal in `unroll!` is not the same as the RHS here then
// these have got out of sync! Don't let this happen!
debug_assert_eq!(NODE_DWORDS, 8);
// This _should_ get unrolled by the compiler, since it's not using the loop variable.
for _ in 0..ETHASH_CACHE_ROUNDS {
for i in 0..num_nodes {
let data_idx = (num_nodes - 1 + i) % num_nodes;
let idx = nodes.get_unchecked_mut(i).as_words()[0] as usize % num_nodes;
// This _should_ get unrolled by the compiler, since it's not using the loop variable.
for _ in 0..ETHASH_CACHE_ROUNDS {
for i in 0..num_nodes {
let data_idx = (num_nodes - 1 + i) % num_nodes;
let idx = nodes.get_unchecked_mut(i).as_words()[0] as usize % num_nodes;
let data = {
let mut data: Node = nodes.get_unchecked(data_idx).clone();
let rhs: &Node = nodes.get_unchecked(idx);
let data = {
let mut data: Node = nodes.get_unchecked(data_idx).clone();
let rhs: &Node = nodes.get_unchecked(idx);
unroll! {
for w in 0..8 {
*data.as_dwords_mut().get_unchecked_mut(w) ^=
*rhs.as_dwords().get_unchecked(w);
}
}
unroll! {
for w in 0..8 {
*data.as_dwords_mut().get_unchecked_mut(w) ^=
*rhs.as_dwords().get_unchecked(w);
}
}
data
};
data
};
keccak_512::write(&data.bytes, &mut nodes.get_unchecked_mut(i).bytes);
}
}
keccak_512::write(&data.bytes, &mut nodes.get_unchecked_mut(i).bytes);
}
}
}

View File

@ -19,15 +19,14 @@
// TODO: fix endianess for big endian
use keccak::{keccak_512, keccak_256, H256};
use cache::{NodeCache, NodeCacheBuilder};
use progpow::{CDag, generate_cdag, progpow, keccak_f800_short, keccak_f800_long};
use keccak::{keccak_256, keccak_512, H256};
use progpow::{generate_cdag, keccak_f800_long, keccak_f800_short, progpow, CDag};
use seed_compute::SeedHashCompute;
use shared::*;
use std::io;
use std::{mem, ptr};
use std::path::Path;
use std::{mem, path::Path, ptr};
const MIX_WORDS: usize = ETHASH_MIX_BYTES / 4;
const MIX_NODES: usize = MIX_WORDS / NODE_WORDS;
@ -35,92 +34,99 @@ pub const FNV_PRIME: u32 = 0x01000193;
/// Computation result
pub struct ProofOfWork {
/// Difficulty boundary
pub value: H256,
/// Mix
pub mix_hash: H256,
/// Difficulty boundary
pub value: H256,
/// Mix
pub mix_hash: H256,
}
enum Algorithm {
Hashimoto,
Progpow(Box<CDag>),
Hashimoto,
Progpow(Box<CDag>),
}
pub struct Light {
block_number: u64,
cache: NodeCache,
algorithm: Algorithm,
block_number: u64,
cache: NodeCache,
algorithm: Algorithm,
}
/// Light cache structure
impl Light {
pub fn new_with_builder(
builder: &NodeCacheBuilder,
cache_dir: &Path,
block_number: u64,
progpow_transition: u64,
) -> Self {
let cache = builder.new_cache(cache_dir.to_path_buf(), block_number);
pub fn new_with_builder(
builder: &NodeCacheBuilder,
cache_dir: &Path,
block_number: u64,
progpow_transition: u64,
) -> Self {
let cache = builder.new_cache(cache_dir.to_path_buf(), block_number);
let algorithm = if block_number >= progpow_transition {
Algorithm::Progpow(Box::new(generate_cdag(cache.as_ref())))
} else {
Algorithm::Hashimoto
};
let algorithm = if block_number >= progpow_transition {
Algorithm::Progpow(Box::new(generate_cdag(cache.as_ref())))
} else {
Algorithm::Hashimoto
};
Light { block_number, cache, algorithm }
}
Light {
block_number,
cache,
algorithm,
}
}
/// Calculate the light boundary data
/// `header_hash` - The header hash to pack into the mix
/// `nonce` - The nonce to pack into the mix
pub fn compute(&self, header_hash: &H256, nonce: u64, block_number: u64) -> ProofOfWork {
match self.algorithm {
Algorithm::Progpow(ref c_dag) => {
let (value, mix_hash) = progpow(
*header_hash,
nonce,
block_number,
self.cache.as_ref(),
c_dag,
);
/// Calculate the light boundary data
/// `header_hash` - The header hash to pack into the mix
/// `nonce` - The nonce to pack into the mix
pub fn compute(&self, header_hash: &H256, nonce: u64, block_number: u64) -> ProofOfWork {
match self.algorithm {
Algorithm::Progpow(ref c_dag) => {
let (value, mix_hash) = progpow(
*header_hash,
nonce,
block_number,
self.cache.as_ref(),
c_dag,
);
ProofOfWork { value, mix_hash }
},
Algorithm::Hashimoto => light_compute(self, header_hash, nonce),
}
ProofOfWork { value, mix_hash }
}
Algorithm::Hashimoto => light_compute(self, header_hash, nonce),
}
}
}
pub fn from_file_with_builder(
builder: &NodeCacheBuilder,
cache_dir: &Path,
block_number: u64,
progpow_transition: u64,
) -> io::Result<Self> {
let cache = builder.from_file(cache_dir.to_path_buf(), block_number)?;
pub fn from_file_with_builder(
builder: &NodeCacheBuilder,
cache_dir: &Path,
block_number: u64,
progpow_transition: u64,
) -> io::Result<Self> {
let cache = builder.from_file(cache_dir.to_path_buf(), block_number)?;
let algorithm = if block_number >= progpow_transition {
Algorithm::Progpow(Box::new(generate_cdag(cache.as_ref())))
} else {
Algorithm::Hashimoto
};
let algorithm = if block_number >= progpow_transition {
Algorithm::Progpow(Box::new(generate_cdag(cache.as_ref())))
} else {
Algorithm::Hashimoto
};
Ok(Light {
block_number,
cache,
algorithm,
})
}
Ok(Light { block_number, cache, algorithm })
}
pub fn to_file(&mut self) -> io::Result<&Path> {
self.cache.flush()?;
Ok(self.cache.cache_path())
}
pub fn to_file(&mut self) -> io::Result<&Path> {
self.cache.flush()?;
Ok(self.cache.cache_path())
}
}
pub fn slow_hash_block_number(block_number: u64) -> H256 {
SeedHashCompute::resume_compute_seedhash([0u8; 32], 0, block_number / ETHASH_EPOCH_LENGTH)
SeedHashCompute::resume_compute_seedhash([0u8; 32], 0, block_number / ETHASH_EPOCH_LENGTH)
}
fn fnv_hash(x: u32, y: u32) -> u32 {
return x.wrapping_mul(FNV_PRIME) ^ y;
return x.wrapping_mul(FNV_PRIME) ^ y;
}
/// Difficulty quick check for POW preverification
@ -129,33 +135,38 @@ fn fnv_hash(x: u32, y: u32) -> u32 {
/// `nonce` The block's nonce
/// `mix_hash` The mix digest hash
/// Boundary recovered from mix hash
pub fn quick_get_difficulty(header_hash: &H256, nonce: u64, mix_hash: &H256, progpow: bool) -> H256 {
unsafe {
if progpow {
let seed = keccak_f800_short(*header_hash, nonce, [0u32; 8]);
keccak_f800_long(*header_hash, seed, mem::transmute(*mix_hash))
} else {
// This is safe - the `keccak_512` call below reads the first 40 bytes (which we explicitly set
// with two `copy_nonoverlapping` calls) but writes the first 64, and then we explicitly write
// the next 32 bytes before we read the whole thing with `keccak_256`.
//
// This cannot be elided by the compiler as it doesn't know the implementation of
// `keccak_512`.
let mut buf: [u8; 64 + 32] = mem::uninitialized();
pub fn quick_get_difficulty(
header_hash: &H256,
nonce: u64,
mix_hash: &H256,
progpow: bool,
) -> H256 {
unsafe {
if progpow {
let seed = keccak_f800_short(*header_hash, nonce, [0u32; 8]);
keccak_f800_long(*header_hash, seed, mem::transmute(*mix_hash))
} else {
// This is safe - the `keccak_512` call below reads the first 40 bytes (which we explicitly set
// with two `copy_nonoverlapping` calls) but writes the first 64, and then we explicitly write
// the next 32 bytes before we read the whole thing with `keccak_256`.
//
// This cannot be elided by the compiler as it doesn't know the implementation of
// `keccak_512`.
let mut buf: [u8; 64 + 32] = mem::uninitialized();
ptr::copy_nonoverlapping(header_hash.as_ptr(), buf.as_mut_ptr(), 32);
ptr::copy_nonoverlapping(&nonce as *const u64 as *const u8, buf[32..].as_mut_ptr(), 8);
ptr::copy_nonoverlapping(header_hash.as_ptr(), buf.as_mut_ptr(), 32);
ptr::copy_nonoverlapping(&nonce as *const u64 as *const u8, buf[32..].as_mut_ptr(), 8);
keccak_512::unchecked(buf.as_mut_ptr(), 64, buf.as_ptr(), 40);
ptr::copy_nonoverlapping(mix_hash.as_ptr(), buf[64..].as_mut_ptr(), 32);
keccak_512::unchecked(buf.as_mut_ptr(), 64, buf.as_ptr(), 40);
ptr::copy_nonoverlapping(mix_hash.as_ptr(), buf[64..].as_mut_ptr(), 32);
// This is initialized in `keccak_256`
let mut hash: [u8; 32] = mem::uninitialized();
keccak_256::unchecked(hash.as_mut_ptr(), hash.len(), buf.as_ptr(), buf.len());
// This is initialized in `keccak_256`
let mut hash: [u8; 32] = mem::uninitialized();
keccak_256::unchecked(hash.as_mut_ptr(), hash.len(), buf.as_ptr(), buf.len());
hash
}
}
hash
}
}
}
/// Calculate the light client data
@ -163,289 +174,310 @@ pub fn quick_get_difficulty(header_hash: &H256, nonce: u64, mix_hash: &H256, pro
/// `header_hash` - The header hash to pack into the mix
/// `nonce` - The nonce to pack into the mix
pub fn light_compute(light: &Light, header_hash: &H256, nonce: u64) -> ProofOfWork {
let full_size = get_data_size(light.block_number);
hash_compute(light, full_size, header_hash, nonce)
let full_size = get_data_size(light.block_number);
hash_compute(light, full_size, header_hash, nonce)
}
fn hash_compute(light: &Light, full_size: usize, header_hash: &H256, nonce: u64) -> ProofOfWork {
macro_rules! make_const_array {
($n:expr, $value:expr) => {{
// We use explicit lifetimes to ensure that val's borrow is invalidated until the
// transmuted val dies.
unsafe fn make_const_array<T, U>(val: &mut [T]) -> &mut [U; $n] {
use ::std::mem;
macro_rules! make_const_array {
($n:expr, $value:expr) => {{
// We use explicit lifetimes to ensure that val's borrow is invalidated until the
// transmuted val dies.
unsafe fn make_const_array<T, U>(val: &mut [T]) -> &mut [U; $n] {
use std::mem;
debug_assert_eq!(val.len() * mem::size_of::<T>(), $n * mem::size_of::<U>());
&mut *(val.as_mut_ptr() as *mut [U; $n])
}
debug_assert_eq!(val.len() * mem::size_of::<T>(), $n * mem::size_of::<U>());
&mut *(val.as_mut_ptr() as *mut [U; $n])
}
make_const_array($value)
}}
}
make_const_array($value)
}};
}
#[repr(C)]
struct MixBuf {
half_mix: Node,
compress_bytes: [u8; MIX_WORDS],
};
#[repr(C)]
struct MixBuf {
half_mix: Node,
compress_bytes: [u8; MIX_WORDS],
};
if full_size % MIX_WORDS != 0 {
panic!("Unaligned full size");
}
if full_size % MIX_WORDS != 0 {
panic!("Unaligned full size");
}
// You may be asking yourself: what in the name of Crypto Jesus is going on here? So: we need
// `half_mix` and `compress_bytes` in a single array later down in the code (we hash them
// together to create `value`) so that we can hash the full array. However, we do a bunch of
// reading and writing to these variables first. We originally allocated two arrays and then
// stuck them together with `ptr::copy_nonoverlapping` at the end, but this method is
// _significantly_ faster - by my benchmarks, a consistent 3-5%. This is the most ridiculous
// optimization I have ever done and I am so sorry. I can only chalk it up to cache locality
// improvements, since I can't imagine that 3-5% of our runtime is taken up by catting two
// arrays together.
let mut buf: MixBuf = MixBuf {
half_mix: unsafe {
// Pack `header_hash` and `nonce` together
// We explicitly write the first 40 bytes, leaving the last 24 as uninitialized. Then
// `keccak_512` reads the first 40 bytes (4th parameter) and overwrites the entire array,
// leaving it fully initialized.
let mut out: [u8; NODE_BYTES] = mem::uninitialized();
// You may be asking yourself: what in the name of Crypto Jesus is going on here? So: we need
// `half_mix` and `compress_bytes` in a single array later down in the code (we hash them
// together to create `value`) so that we can hash the full array. However, we do a bunch of
// reading and writing to these variables first. We originally allocated two arrays and then
// stuck them together with `ptr::copy_nonoverlapping` at the end, but this method is
// _significantly_ faster - by my benchmarks, a consistent 3-5%. This is the most ridiculous
// optimization I have ever done and I am so sorry. I can only chalk it up to cache locality
// improvements, since I can't imagine that 3-5% of our runtime is taken up by catting two
// arrays together.
let mut buf: MixBuf = MixBuf {
half_mix: unsafe {
// Pack `header_hash` and `nonce` together
// We explicitly write the first 40 bytes, leaving the last 24 as uninitialized. Then
// `keccak_512` reads the first 40 bytes (4th parameter) and overwrites the entire array,
// leaving it fully initialized.
let mut out: [u8; NODE_BYTES] = mem::uninitialized();
ptr::copy_nonoverlapping(header_hash.as_ptr(), out.as_mut_ptr(), header_hash.len());
ptr::copy_nonoverlapping(
&nonce as *const u64 as *const u8,
out[header_hash.len()..].as_mut_ptr(),
mem::size_of::<u64>(),
);
ptr::copy_nonoverlapping(header_hash.as_ptr(), out.as_mut_ptr(), header_hash.len());
ptr::copy_nonoverlapping(
&nonce as *const u64 as *const u8,
out[header_hash.len()..].as_mut_ptr(),
mem::size_of::<u64>(),
);
// compute keccak-512 hash and replicate across mix
keccak_512::unchecked(
out.as_mut_ptr(),
NODE_BYTES,
out.as_ptr(),
header_hash.len() + mem::size_of::<u64>(),
);
// compute keccak-512 hash and replicate across mix
keccak_512::unchecked(
out.as_mut_ptr(),
NODE_BYTES,
out.as_ptr(),
header_hash.len() + mem::size_of::<u64>(),
);
Node { bytes: out }
},
// This is fully initialized before being read, see `let mut compress = ...` below
compress_bytes: unsafe { mem::uninitialized() },
};
Node { bytes: out }
},
// This is fully initialized before being read, see `let mut compress = ...` below
compress_bytes: unsafe { mem::uninitialized() },
};
let mut mix: [_; MIX_NODES] = [buf.half_mix.clone(), buf.half_mix.clone()];
let mut mix: [_; MIX_NODES] = [buf.half_mix.clone(), buf.half_mix.clone()];
let page_size = 4 * MIX_WORDS;
let num_full_pages = (full_size / page_size) as u32;
// deref once for better performance
let cache: &[Node] = light.cache.as_ref();
let first_val = buf.half_mix.as_words()[0];
let page_size = 4 * MIX_WORDS;
let num_full_pages = (full_size / page_size) as u32;
// deref once for better performance
let cache: &[Node] = light.cache.as_ref();
let first_val = buf.half_mix.as_words()[0];
debug_assert_eq!(MIX_NODES, 2);
debug_assert_eq!(NODE_WORDS, 16);
debug_assert_eq!(MIX_NODES, 2);
debug_assert_eq!(NODE_WORDS, 16);
for i in 0..ETHASH_ACCESSES as u32 {
let index = {
// This is trivially safe, but does not work on big-endian. The safety of this is
// asserted in debug builds (see the definition of `make_const_array!`).
let mix_words: &mut [u32; MIX_WORDS] =
unsafe { make_const_array!(MIX_WORDS, &mut mix) };
for i in 0..ETHASH_ACCESSES as u32 {
let index = {
// This is trivially safe, but does not work on big-endian. The safety of this is
// asserted in debug builds (see the definition of `make_const_array!`).
let mix_words: &mut [u32; MIX_WORDS] =
unsafe { make_const_array!(MIX_WORDS, &mut mix) };
fnv_hash(first_val ^ i, mix_words[i as usize % MIX_WORDS]) % num_full_pages
};
fnv_hash(first_val ^ i, mix_words[i as usize % MIX_WORDS]) % num_full_pages
};
unroll! {
// MIX_NODES
for n in 0..2 {
let tmp_node = calculate_dag_item(
index * MIX_NODES as u32 + n as u32,
cache,
);
unroll! {
// MIX_NODES
for n in 0..2 {
let tmp_node = calculate_dag_item(
index * MIX_NODES as u32 + n as u32,
cache,
);
unroll! {
// NODE_WORDS
for w in 0..16 {
mix[n].as_words_mut()[w] =
fnv_hash(
mix[n].as_words()[w],
tmp_node.as_words()[w],
);
}
}
}
}
}
unroll! {
// NODE_WORDS
for w in 0..16 {
mix[n].as_words_mut()[w] =
fnv_hash(
mix[n].as_words()[w],
tmp_node.as_words()[w],
);
}
}
}
}
}
let mix_words: [u32; MIX_WORDS] = unsafe { mem::transmute(mix) };
let mix_words: [u32; MIX_WORDS] = unsafe { mem::transmute(mix) };
{
// This is an uninitialized buffer to begin with, but we iterate precisely `compress.len()`
// times and set each index, leaving the array fully initialized. THIS ONLY WORKS ON LITTLE-
// ENDIAN MACHINES. See a future PR to make this and the rest of the code work correctly on
// big-endian arches like mips.
let compress: &mut [u32; MIX_WORDS / 4] =
unsafe { make_const_array!(MIX_WORDS / 4, &mut buf.compress_bytes) };
{
// This is an uninitialized buffer to begin with, but we iterate precisely `compress.len()`
// times and set each index, leaving the array fully initialized. THIS ONLY WORKS ON LITTLE-
// ENDIAN MACHINES. See a future PR to make this and the rest of the code work correctly on
// big-endian arches like mips.
let compress: &mut [u32; MIX_WORDS / 4] =
unsafe { make_const_array!(MIX_WORDS / 4, &mut buf.compress_bytes) };
// Compress mix
debug_assert_eq!(MIX_WORDS / 4, 8);
unroll! {
for i in 0..8 {
let w = i * 4;
// Compress mix
debug_assert_eq!(MIX_WORDS / 4, 8);
unroll! {
for i in 0..8 {
let w = i * 4;
let mut reduction = mix_words[w + 0];
reduction = reduction.wrapping_mul(FNV_PRIME) ^ mix_words[w + 1];
reduction = reduction.wrapping_mul(FNV_PRIME) ^ mix_words[w + 2];
reduction = reduction.wrapping_mul(FNV_PRIME) ^ mix_words[w + 3];
compress[i] = reduction;
}
}
}
let mut reduction = mix_words[w + 0];
reduction = reduction.wrapping_mul(FNV_PRIME) ^ mix_words[w + 1];
reduction = reduction.wrapping_mul(FNV_PRIME) ^ mix_words[w + 2];
reduction = reduction.wrapping_mul(FNV_PRIME) ^ mix_words[w + 3];
compress[i] = reduction;
}
}
}
let mix_hash = buf.compress_bytes;
let mix_hash = buf.compress_bytes;
let value: H256 = {
// We can interpret the buffer as an array of `u8`s, since it's `repr(C)`.
let read_ptr: *const u8 = &buf as *const MixBuf as *const u8;
// We overwrite the second half since `keccak_256` has an internal buffer and so allows
// overlapping arrays as input.
let write_ptr: *mut u8 = &mut buf.compress_bytes as *mut [u8; 32] as *mut u8;
unsafe {
keccak_256::unchecked(
write_ptr,
buf.compress_bytes.len(),
read_ptr,
buf.half_mix.bytes.len() + buf.compress_bytes.len(),
);
}
buf.compress_bytes
};
let value: H256 = {
// We can interpret the buffer as an array of `u8`s, since it's `repr(C)`.
let read_ptr: *const u8 = &buf as *const MixBuf as *const u8;
// We overwrite the second half since `keccak_256` has an internal buffer and so allows
// overlapping arrays as input.
let write_ptr: *mut u8 = &mut buf.compress_bytes as *mut [u8; 32] as *mut u8;
unsafe {
keccak_256::unchecked(
write_ptr,
buf.compress_bytes.len(),
read_ptr,
buf.half_mix.bytes.len() + buf.compress_bytes.len(),
);
}
buf.compress_bytes
};
ProofOfWork { mix_hash: mix_hash, value: value }
ProofOfWork {
mix_hash: mix_hash,
value: value,
}
}
// TODO: Use the `simd` crate
pub fn calculate_dag_item(node_index: u32, cache: &[Node]) -> Node {
let num_parent_nodes = cache.len();
let mut ret = cache[node_index as usize % num_parent_nodes].clone();
ret.as_words_mut()[0] ^= node_index;
let num_parent_nodes = cache.len();
let mut ret = cache[node_index as usize % num_parent_nodes].clone();
ret.as_words_mut()[0] ^= node_index;
keccak_512::inplace(ret.as_bytes_mut());
keccak_512::inplace(ret.as_bytes_mut());
debug_assert_eq!(NODE_WORDS, 16);
for i in 0..ETHASH_DATASET_PARENTS as u32 {
let parent_index = fnv_hash(node_index ^ i, ret.as_words()[i as usize % NODE_WORDS]) %
num_parent_nodes as u32;
let parent = &cache[parent_index as usize];
debug_assert_eq!(NODE_WORDS, 16);
for i in 0..ETHASH_DATASET_PARENTS as u32 {
let parent_index = fnv_hash(node_index ^ i, ret.as_words()[i as usize % NODE_WORDS])
% num_parent_nodes as u32;
let parent = &cache[parent_index as usize];
unroll! {
for w in 0..16 {
ret.as_words_mut()[w] = fnv_hash(ret.as_words()[w], parent.as_words()[w]);
}
}
}
unroll! {
for w in 0..16 {
ret.as_words_mut()[w] = fnv_hash(ret.as_words()[w], parent.as_words()[w]);
}
}
}
keccak_512::inplace(ret.as_bytes_mut());
keccak_512::inplace(ret.as_bytes_mut());
ret
ret
}
#[cfg(test)]
mod test {
use super::*;
use std::fs;
use tempdir::TempDir;
use super::*;
use std::fs;
use tempdir::TempDir;
#[test]
fn test_get_cache_size() {
// https://github.com/ethereum/wiki/wiki/Ethash/ef6b93f9596746a088ea95d01ca2778be43ae68f#data-sizes
assert_eq!(16776896usize, get_cache_size(0));
assert_eq!(16776896usize, get_cache_size(1));
assert_eq!(16776896usize, get_cache_size(ETHASH_EPOCH_LENGTH - 1));
assert_eq!(16907456usize, get_cache_size(ETHASH_EPOCH_LENGTH));
assert_eq!(16907456usize, get_cache_size(ETHASH_EPOCH_LENGTH + 1));
assert_eq!(284950208usize, get_cache_size(2046 * ETHASH_EPOCH_LENGTH));
assert_eq!(285081536usize, get_cache_size(2047 * ETHASH_EPOCH_LENGTH));
assert_eq!(285081536usize, get_cache_size(2048 * ETHASH_EPOCH_LENGTH - 1));
}
#[test]
fn test_get_cache_size() {
// https://github.com/ethereum/wiki/wiki/Ethash/ef6b93f9596746a088ea95d01ca2778be43ae68f#data-sizes
assert_eq!(16776896usize, get_cache_size(0));
assert_eq!(16776896usize, get_cache_size(1));
assert_eq!(16776896usize, get_cache_size(ETHASH_EPOCH_LENGTH - 1));
assert_eq!(16907456usize, get_cache_size(ETHASH_EPOCH_LENGTH));
assert_eq!(16907456usize, get_cache_size(ETHASH_EPOCH_LENGTH + 1));
assert_eq!(284950208usize, get_cache_size(2046 * ETHASH_EPOCH_LENGTH));
assert_eq!(285081536usize, get_cache_size(2047 * ETHASH_EPOCH_LENGTH));
assert_eq!(
285081536usize,
get_cache_size(2048 * ETHASH_EPOCH_LENGTH - 1)
);
}
#[test]
fn test_get_data_size() {
// https://github.com/ethereum/wiki/wiki/Ethash/ef6b93f9596746a088ea95d01ca2778be43ae68f#data-sizes
assert_eq!(1073739904usize, get_data_size(0));
assert_eq!(1073739904usize, get_data_size(1));
assert_eq!(1073739904usize, get_data_size(ETHASH_EPOCH_LENGTH - 1));
assert_eq!(1082130304usize, get_data_size(ETHASH_EPOCH_LENGTH));
assert_eq!(1082130304usize, get_data_size(ETHASH_EPOCH_LENGTH + 1));
assert_eq!(18236833408usize, get_data_size(2046 * ETHASH_EPOCH_LENGTH));
assert_eq!(18245220736usize, get_data_size(2047 * ETHASH_EPOCH_LENGTH));
}
#[test]
fn test_get_data_size() {
// https://github.com/ethereum/wiki/wiki/Ethash/ef6b93f9596746a088ea95d01ca2778be43ae68f#data-sizes
assert_eq!(1073739904usize, get_data_size(0));
assert_eq!(1073739904usize, get_data_size(1));
assert_eq!(1073739904usize, get_data_size(ETHASH_EPOCH_LENGTH - 1));
assert_eq!(1082130304usize, get_data_size(ETHASH_EPOCH_LENGTH));
assert_eq!(1082130304usize, get_data_size(ETHASH_EPOCH_LENGTH + 1));
assert_eq!(18236833408usize, get_data_size(2046 * ETHASH_EPOCH_LENGTH));
assert_eq!(18245220736usize, get_data_size(2047 * ETHASH_EPOCH_LENGTH));
}
#[test]
fn test_difficulty_test() {
let hash = [
0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3,
0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94,
0x05, 0x52, 0x7d, 0x72,
];
let mix_hash = [
0x1f, 0xff, 0x04, 0xce, 0xc9, 0x41, 0x73, 0xfd, 0x59, 0x1e, 0x3d, 0x89, 0x60, 0xce,
0x6b, 0xdf, 0x8b, 0x19, 0x71, 0x04, 0x8c, 0x71, 0xff, 0x93, 0x7b, 0xb2, 0xd3, 0x2a,
0x64, 0x31, 0xab, 0x6d,
];
let nonce = 0xd7b3ac70a301a249;
let boundary_good = [
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2,
0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a,
0xe9, 0x7e, 0x53, 0x84,
];
assert_eq!(quick_get_difficulty(&hash, nonce, &mix_hash, false)[..], boundary_good[..]);
let boundary_bad = [
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3a, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2,
0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a,
0xe9, 0x7e, 0x53, 0x84,
];
assert!(quick_get_difficulty(&hash, nonce, &mix_hash, false)[..] != boundary_bad[..]);
}
#[test]
fn test_difficulty_test() {
let hash = [
0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3,
0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94,
0x05, 0x52, 0x7d, 0x72,
];
let mix_hash = [
0x1f, 0xff, 0x04, 0xce, 0xc9, 0x41, 0x73, 0xfd, 0x59, 0x1e, 0x3d, 0x89, 0x60, 0xce,
0x6b, 0xdf, 0x8b, 0x19, 0x71, 0x04, 0x8c, 0x71, 0xff, 0x93, 0x7b, 0xb2, 0xd3, 0x2a,
0x64, 0x31, 0xab, 0x6d,
];
let nonce = 0xd7b3ac70a301a249;
let boundary_good = [
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2,
0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a,
0xe9, 0x7e, 0x53, 0x84,
];
assert_eq!(
quick_get_difficulty(&hash, nonce, &mix_hash, false)[..],
boundary_good[..]
);
let boundary_bad = [
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3a, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2,
0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a,
0xe9, 0x7e, 0x53, 0x84,
];
assert!(quick_get_difficulty(&hash, nonce, &mix_hash, false)[..] != boundary_bad[..]);
}
#[test]
fn test_light_compute() {
let hash = [
0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3,
0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94,
0x05, 0x52, 0x7d, 0x72,
];
let mix_hash = [
0x1f, 0xff, 0x04, 0xce, 0xc9, 0x41, 0x73, 0xfd, 0x59, 0x1e, 0x3d, 0x89, 0x60, 0xce,
0x6b, 0xdf, 0x8b, 0x19, 0x71, 0x04, 0x8c, 0x71, 0xff, 0x93, 0x7b, 0xb2, 0xd3, 0x2a,
0x64, 0x31, 0xab, 0x6d,
];
let boundary = [
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2,
0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a,
0xe9, 0x7e, 0x53, 0x84,
];
let nonce = 0xd7b3ac70a301a249;
#[test]
fn test_light_compute() {
let hash = [
0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3,
0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94,
0x05, 0x52, 0x7d, 0x72,
];
let mix_hash = [
0x1f, 0xff, 0x04, 0xce, 0xc9, 0x41, 0x73, 0xfd, 0x59, 0x1e, 0x3d, 0x89, 0x60, 0xce,
0x6b, 0xdf, 0x8b, 0x19, 0x71, 0x04, 0x8c, 0x71, 0xff, 0x93, 0x7b, 0xb2, 0xd3, 0x2a,
0x64, 0x31, 0xab, 0x6d,
];
let boundary = [
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2,
0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a,
0xe9, 0x7e, 0x53, 0x84,
];
let nonce = 0xd7b3ac70a301a249;
let tempdir = TempDir::new("").unwrap();
// difficulty = 0x085657254bd9u64;
let light = NodeCacheBuilder::new(None, u64::max_value()).light(tempdir.path(), 486382);
let result = light_compute(&light, &hash, nonce);
assert_eq!(result.mix_hash[..], mix_hash[..]);
assert_eq!(result.value[..], boundary[..]);
}
let tempdir = TempDir::new("").unwrap();
// difficulty = 0x085657254bd9u64;
let light = NodeCacheBuilder::new(None, u64::max_value()).light(tempdir.path(), 486382);
let result = light_compute(&light, &hash, nonce);
assert_eq!(result.mix_hash[..], mix_hash[..]);
assert_eq!(result.value[..], boundary[..]);
}
#[test]
fn test_drop_old_data() {
let tempdir = TempDir::new("").unwrap();
let builder = NodeCacheBuilder::new(None, u64::max_value());
let first = builder.light(tempdir.path(), 0).to_file().unwrap().to_owned();
#[test]
fn test_drop_old_data() {
let tempdir = TempDir::new("").unwrap();
let builder = NodeCacheBuilder::new(None, u64::max_value());
let first = builder
.light(tempdir.path(), 0)
.to_file()
.unwrap()
.to_owned();
let second = builder.light(tempdir.path(), ETHASH_EPOCH_LENGTH).to_file().unwrap().to_owned();
assert!(fs::metadata(&first).is_ok());
let second = builder
.light(tempdir.path(), ETHASH_EPOCH_LENGTH)
.to_file()
.unwrap()
.to_owned();
assert!(fs::metadata(&first).is_ok());
let _ = builder.light(tempdir.path(), ETHASH_EPOCH_LENGTH * 2).to_file();
assert!(fs::metadata(&first).is_err());
assert!(fs::metadata(&second).is_ok());
let _ = builder
.light(tempdir.path(), ETHASH_EPOCH_LENGTH * 2)
.to_file();
assert!(fs::metadata(&first).is_err());
assert!(fs::metadata(&second).is_ok());
let _ = builder.light(tempdir.path(), ETHASH_EPOCH_LENGTH * 3).to_file();
assert!(fs::metadata(&second).is_err());
}
let _ = builder
.light(tempdir.path(), ETHASH_EPOCH_LENGTH * 3)
.to_file();
assert!(fs::metadata(&second).is_err());
}
}

View File

@ -19,38 +19,48 @@ extern crate keccak_hash as hash;
pub type H256 = [u8; 32];
pub mod keccak_512 {
use super::hash;
use super::hash;
pub use self::hash::keccak_512_unchecked as unchecked;
pub use self::hash::keccak_512_unchecked as unchecked;
pub fn write(input: &[u8], output: &mut [u8]) {
hash::keccak_512(input, output);
}
pub fn write(input: &[u8], output: &mut [u8]) {
hash::keccak_512(input, output);
}
pub fn inplace(input: &mut [u8]) {
// This is safe since `keccak_*` uses an internal buffer and copies the result to the output. This
// means that we can reuse the input buffer for both input and output.
unsafe {
hash::keccak_512_unchecked(input.as_mut_ptr(), input.len(), input.as_ptr(), input.len());
}
}
pub fn inplace(input: &mut [u8]) {
// This is safe since `keccak_*` uses an internal buffer and copies the result to the output. This
// means that we can reuse the input buffer for both input and output.
unsafe {
hash::keccak_512_unchecked(
input.as_mut_ptr(),
input.len(),
input.as_ptr(),
input.len(),
);
}
}
}
pub mod keccak_256 {
use super::hash;
use super::hash;
pub use self::hash::keccak_256_unchecked as unchecked;
pub use self::hash::keccak_256_unchecked as unchecked;
#[allow(dead_code)]
pub fn write(input: &[u8], output: &mut [u8]) {
hash::keccak_256(input, output);
}
#[allow(dead_code)]
pub fn write(input: &[u8], output: &mut [u8]) {
hash::keccak_256(input, output);
}
pub fn inplace(input: &mut [u8]) {
// This is safe since `keccak_*` uses an internal buffer and copies the result to the output. This
// means that we can reuse the input buffer for both input and output.
unsafe {
hash::keccak_256_unchecked(input.as_mut_ptr(), input.len(), input.as_ptr(), input.len());
}
}
pub fn inplace(input: &mut [u8]) {
// This is safe since `keccak_*` uses an internal buffer and copies the result to the output. This
// means that we can reuse the input buffer for both input and output.
unsafe {
hash::keccak_256_unchecked(
input.as_mut_ptr(),
input.len(),
input.as_ptr(),
input.len(),
);
}
}
}

View File

@ -39,9 +39,9 @@ pub mod compute;
#[cfg(not(feature = "bench"))]
mod compute;
mod seed_compute;
mod cache;
mod keccak;
mod seed_compute;
mod shared;
#[cfg(feature = "bench")]
@ -50,190 +50,221 @@ pub mod progpow;
mod progpow;
pub use cache::{NodeCacheBuilder, OptimizeFor};
pub use compute::{ProofOfWork, quick_get_difficulty, slow_hash_block_number};
use compute::Light;
pub use compute::{quick_get_difficulty, slow_hash_block_number, ProofOfWork};
use ethereum_types::{U256, U512};
use keccak::H256;
use parking_lot::Mutex;
pub use seed_compute::SeedHashCompute;
pub use shared::ETHASH_EPOCH_LENGTH;
use std::mem;
use std::path::{Path, PathBuf};
use std::{
mem,
path::{Path, PathBuf},
};
use std::sync::Arc;
struct LightCache {
recent_epoch: Option<u64>,
recent: Option<Arc<Light>>,
prev_epoch: Option<u64>,
prev: Option<Arc<Light>>,
recent_epoch: Option<u64>,
recent: Option<Arc<Light>>,
prev_epoch: Option<u64>,
prev: Option<Arc<Light>>,
}
/// Light/Full cache manager.
pub struct EthashManager {
nodecache_builder: NodeCacheBuilder,
cache: Mutex<LightCache>,
cache_dir: PathBuf,
progpow_transition: u64,
nodecache_builder: NodeCacheBuilder,
cache: Mutex<LightCache>,
cache_dir: PathBuf,
progpow_transition: u64,
}
impl EthashManager {
/// Create a new new instance of ethash manager
pub fn new<T: Into<Option<OptimizeFor>>>(cache_dir: &Path, optimize_for: T, progpow_transition: u64) -> EthashManager {
EthashManager {
cache_dir: cache_dir.to_path_buf(),
nodecache_builder: NodeCacheBuilder::new(optimize_for.into().unwrap_or_default(), progpow_transition),
progpow_transition: progpow_transition,
cache: Mutex::new(LightCache {
recent_epoch: None,
recent: None,
prev_epoch: None,
prev: None,
}),
}
}
/// Create a new new instance of ethash manager
pub fn new<T: Into<Option<OptimizeFor>>>(
cache_dir: &Path,
optimize_for: T,
progpow_transition: u64,
) -> EthashManager {
EthashManager {
cache_dir: cache_dir.to_path_buf(),
nodecache_builder: NodeCacheBuilder::new(
optimize_for.into().unwrap_or_default(),
progpow_transition,
),
progpow_transition: progpow_transition,
cache: Mutex::new(LightCache {
recent_epoch: None,
recent: None,
prev_epoch: None,
prev: None,
}),
}
}
/// Calculate the light client data
/// `block_number` - Block number to check
/// `light` - The light client handler
/// `header_hash` - The header hash to pack into the mix
/// `nonce` - The nonce to pack into the mix
pub fn compute_light(&self, block_number: u64, header_hash: &H256, nonce: u64) -> ProofOfWork {
let epoch = block_number / ETHASH_EPOCH_LENGTH;
let light = {
let mut lights = self.cache.lock();
let light = if block_number == self.progpow_transition {
// we need to regenerate the cache to trigger algorithm change to progpow inside `Light`
None
} else {
match lights.recent_epoch.clone() {
Some(ref e) if *e == epoch => lights.recent.clone(),
_ => match lights.prev_epoch.clone() {
Some(e) if e == epoch => {
// don't swap if recent is newer.
if lights.recent_epoch > lights.prev_epoch {
None
} else {
// swap
let t = lights.prev_epoch;
lights.prev_epoch = lights.recent_epoch;
lights.recent_epoch = t;
let t = lights.prev.clone();
lights.prev = lights.recent.clone();
lights.recent = t;
lights.recent.clone()
}
}
_ => None,
},
}
};
/// Calculate the light client data
/// `block_number` - Block number to check
/// `light` - The light client handler
/// `header_hash` - The header hash to pack into the mix
/// `nonce` - The nonce to pack into the mix
pub fn compute_light(&self, block_number: u64, header_hash: &H256, nonce: u64) -> ProofOfWork {
let epoch = block_number / ETHASH_EPOCH_LENGTH;
let light = {
let mut lights = self.cache.lock();
let light = if block_number == self.progpow_transition {
// we need to regenerate the cache to trigger algorithm change to progpow inside `Light`
None
} else {
match lights.recent_epoch.clone() {
Some(ref e) if *e == epoch => lights.recent.clone(),
_ => match lights.prev_epoch.clone() {
Some(e) if e == epoch => {
// don't swap if recent is newer.
if lights.recent_epoch > lights.prev_epoch {
None
} else {
// swap
let t = lights.prev_epoch;
lights.prev_epoch = lights.recent_epoch;
lights.recent_epoch = t;
let t = lights.prev.clone();
lights.prev = lights.recent.clone();
lights.recent = t;
lights.recent.clone()
}
}
_ => None,
},
}
};
match light {
None => {
let light = match self.nodecache_builder.light_from_file(
&self.cache_dir,
block_number,
) {
Ok(light) => Arc::new(light),
Err(e) => {
debug!("Light cache file not found for {}:{}", block_number, e);
let mut light = self.nodecache_builder.light(
&self.cache_dir,
block_number,
);
if let Err(e) = light.to_file() {
warn!("Light cache file write error: {}", e);
}
Arc::new(light)
}
};
lights.prev_epoch = mem::replace(&mut lights.recent_epoch, Some(epoch));
lights.prev = mem::replace(&mut lights.recent, Some(light.clone()));
light
}
Some(light) => light,
}
};
light.compute(header_hash, nonce, block_number)
}
match light {
None => {
let light = match self
.nodecache_builder
.light_from_file(&self.cache_dir, block_number)
{
Ok(light) => Arc::new(light),
Err(e) => {
debug!("Light cache file not found for {}:{}", block_number, e);
let mut light =
self.nodecache_builder.light(&self.cache_dir, block_number);
if let Err(e) = light.to_file() {
warn!("Light cache file write error: {}", e);
}
Arc::new(light)
}
};
lights.prev_epoch = mem::replace(&mut lights.recent_epoch, Some(epoch));
lights.prev = mem::replace(&mut lights.recent, Some(light.clone()));
light
}
Some(light) => light,
}
};
light.compute(header_hash, nonce, block_number)
}
}
/// Convert an Ethash boundary to its original difficulty. Basically just `f(x) = 2^256 / x`.
pub fn boundary_to_difficulty(boundary: &ethereum_types::H256) -> U256 {
difficulty_to_boundary_aux(&**boundary)
difficulty_to_boundary_aux(&**boundary)
}
/// Convert an Ethash difficulty to the target boundary. Basically just `f(x) = 2^256 / x`.
pub fn difficulty_to_boundary(difficulty: &U256) -> ethereum_types::H256 {
difficulty_to_boundary_aux(difficulty).into()
difficulty_to_boundary_aux(difficulty).into()
}
fn difficulty_to_boundary_aux<T: Into<U512>>(difficulty: T) -> ethereum_types::U256 {
let difficulty = difficulty.into();
let difficulty = difficulty.into();
assert!(!difficulty.is_zero());
assert!(!difficulty.is_zero());
if difficulty == U512::one() {
U256::max_value()
} else {
// difficulty > 1, so result should never overflow 256 bits
U256::from((U512::one() << 256) / difficulty)
}
if difficulty == U512::one() {
U256::max_value()
} else {
// difficulty > 1, so result should never overflow 256 bits
U256::from((U512::one() << 256) / difficulty)
}
}
#[test]
fn test_lru() {
use tempdir::TempDir;
use tempdir::TempDir;
let tempdir = TempDir::new("").unwrap();
let ethash = EthashManager::new(tempdir.path(), None, u64::max_value());
let hash = [0u8; 32];
ethash.compute_light(1, &hash, 1);
ethash.compute_light(50000, &hash, 1);
assert_eq!(ethash.cache.lock().recent_epoch.unwrap(), 1);
assert_eq!(ethash.cache.lock().prev_epoch.unwrap(), 0);
ethash.compute_light(1, &hash, 1);
assert_eq!(ethash.cache.lock().recent_epoch.unwrap(), 0);
assert_eq!(ethash.cache.lock().prev_epoch.unwrap(), 1);
ethash.compute_light(70000, &hash, 1);
assert_eq!(ethash.cache.lock().recent_epoch.unwrap(), 2);
assert_eq!(ethash.cache.lock().prev_epoch.unwrap(), 0);
let tempdir = TempDir::new("").unwrap();
let ethash = EthashManager::new(tempdir.path(), None, u64::max_value());
let hash = [0u8; 32];
ethash.compute_light(1, &hash, 1);
ethash.compute_light(50000, &hash, 1);
assert_eq!(ethash.cache.lock().recent_epoch.unwrap(), 1);
assert_eq!(ethash.cache.lock().prev_epoch.unwrap(), 0);
ethash.compute_light(1, &hash, 1);
assert_eq!(ethash.cache.lock().recent_epoch.unwrap(), 0);
assert_eq!(ethash.cache.lock().prev_epoch.unwrap(), 1);
ethash.compute_light(70000, &hash, 1);
assert_eq!(ethash.cache.lock().recent_epoch.unwrap(), 2);
assert_eq!(ethash.cache.lock().prev_epoch.unwrap(), 0);
}
#[test]
fn test_difficulty_to_boundary() {
use ethereum_types::H256;
use std::str::FromStr;
use ethereum_types::H256;
use std::str::FromStr;
assert_eq!(difficulty_to_boundary(&U256::from(1)), H256::from(U256::max_value()));
assert_eq!(difficulty_to_boundary(&U256::from(2)), H256::from_str("8000000000000000000000000000000000000000000000000000000000000000").unwrap());
assert_eq!(difficulty_to_boundary(&U256::from(4)), H256::from_str("4000000000000000000000000000000000000000000000000000000000000000").unwrap());
assert_eq!(difficulty_to_boundary(&U256::from(32)), H256::from_str("0800000000000000000000000000000000000000000000000000000000000000").unwrap());
assert_eq!(
difficulty_to_boundary(&U256::from(1)),
H256::from(U256::max_value())
);
assert_eq!(
difficulty_to_boundary(&U256::from(2)),
H256::from_str("8000000000000000000000000000000000000000000000000000000000000000").unwrap()
);
assert_eq!(
difficulty_to_boundary(&U256::from(4)),
H256::from_str("4000000000000000000000000000000000000000000000000000000000000000").unwrap()
);
assert_eq!(
difficulty_to_boundary(&U256::from(32)),
H256::from_str("0800000000000000000000000000000000000000000000000000000000000000").unwrap()
);
}
#[test]
fn test_difficulty_to_boundary_regression() {
use ethereum_types::H256;
use ethereum_types::H256;
// the last bit was originally being truncated when performing the conversion
// https://github.com/paritytech/parity-ethereum/issues/8397
for difficulty in 1..9 {
assert_eq!(U256::from(difficulty), boundary_to_difficulty(&difficulty_to_boundary(&difficulty.into())));
assert_eq!(H256::from(difficulty), difficulty_to_boundary(&boundary_to_difficulty(&difficulty.into())));
assert_eq!(U256::from(difficulty), boundary_to_difficulty(&boundary_to_difficulty(&difficulty.into()).into()));
assert_eq!(H256::from(difficulty), difficulty_to_boundary(&difficulty_to_boundary(&difficulty.into()).into()));
}
// the last bit was originally being truncated when performing the conversion
// https://github.com/paritytech/parity-ethereum/issues/8397
for difficulty in 1..9 {
assert_eq!(
U256::from(difficulty),
boundary_to_difficulty(&difficulty_to_boundary(&difficulty.into()))
);
assert_eq!(
H256::from(difficulty),
difficulty_to_boundary(&boundary_to_difficulty(&difficulty.into()))
);
assert_eq!(
U256::from(difficulty),
boundary_to_difficulty(&boundary_to_difficulty(&difficulty.into()).into())
);
assert_eq!(
H256::from(difficulty),
difficulty_to_boundary(&difficulty_to_boundary(&difficulty.into()).into())
);
}
}
#[test]
#[should_panic]
fn test_difficulty_to_boundary_panics_on_zero() {
difficulty_to_boundary(&U256::from(0));
difficulty_to_boundary(&U256::from(0));
}
#[test]
#[should_panic]
fn test_boundary_to_difficulty_panics_on_zero() {
boundary_to_difficulty(&ethereum_types::H256::from(0));
boundary_to_difficulty(&ethereum_types::H256::from(0));
}

View File

@ -14,9 +14,9 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use compute::{FNV_PRIME, calculate_dag_item};
use compute::{calculate_dag_item, FNV_PRIME};
use keccak::H256;
use shared::{ETHASH_ACCESSES, ETHASH_MIX_BYTES, Node, get_data_size};
use shared::{get_data_size, Node, ETHASH_ACCESSES, ETHASH_MIX_BYTES};
const PROGPOW_CACHE_BYTES: usize = 16 * 1024;
const PROGPOW_CACHE_WORDS: usize = PROGPOW_CACHE_BYTES / 4;
@ -32,564 +32,551 @@ const PROGPOW_REGS: usize = 32;
const FNV_HASH: u32 = 0x811c9dc5;
const KECCAKF_RNDC: [u32; 24] = [
0x00000001, 0x00008082, 0x0000808a, 0x80008000, 0x0000808b, 0x80000001,
0x80008081, 0x00008009, 0x0000008a, 0x00000088, 0x80008009, 0x8000000a,
0x8000808b, 0x0000008b, 0x00008089, 0x00008003, 0x00008002, 0x00000080,
0x0000800a, 0x8000000a, 0x80008081, 0x00008080, 0x80000001, 0x80008008
0x00000001, 0x00008082, 0x0000808a, 0x80008000, 0x0000808b, 0x80000001, 0x80008081, 0x00008009,
0x0000008a, 0x00000088, 0x80008009, 0x8000000a, 0x8000808b, 0x0000008b, 0x00008089, 0x00008003,
0x00008002, 0x00000080, 0x0000800a, 0x8000000a, 0x80008081, 0x00008080, 0x80000001, 0x80008008,
];
const KECCAKF_ROTC: [u32; 24] = [
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44,
];
const KECCAKF_PILN: [usize; 24] = [
10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1,
];
fn keccak_f800_round(st: &mut [u32; 25], r: usize) {
// Theta
let mut bc = [0u32; 5];
for i in 0..bc.len() {
bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] ^ st[i + 20];
}
// Theta
let mut bc = [0u32; 5];
for i in 0..bc.len() {
bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] ^ st[i + 20];
}
for i in 0..bc.len() {
let t = bc[(i + 4) % 5] ^ bc[(i + 1) % 5].rotate_left(1);
for j in (0..st.len()).step_by(5) {
st[j + i] ^= t;
}
}
for i in 0..bc.len() {
let t = bc[(i + 4) % 5] ^ bc[(i + 1) % 5].rotate_left(1);
for j in (0..st.len()).step_by(5) {
st[j + i] ^= t;
}
}
// Rho Pi
let mut t = st[1];
// Rho Pi
let mut t = st[1];
debug_assert_eq!(KECCAKF_ROTC.len(), 24);
for i in 0..24 {
let j = KECCAKF_PILN[i];
bc[0] = st[j];
st[j] = t.rotate_left(KECCAKF_ROTC[i]);
t = bc[0];
}
debug_assert_eq!(KECCAKF_ROTC.len(), 24);
for i in 0..24 {
let j = KECCAKF_PILN[i];
bc[0] = st[j];
st[j] = t.rotate_left(KECCAKF_ROTC[i]);
t = bc[0];
}
// Chi
for j in (0..st.len()).step_by(5) {
for i in 0..bc.len() {
bc[i] = st[j + i];
}
for i in 0..bc.len() {
st[j + i] ^= (!bc[(i + 1) % 5]) & bc[(i + 2) % 5];
}
}
// Chi
for j in (0..st.len()).step_by(5) {
for i in 0..bc.len() {
bc[i] = st[j + i];
}
for i in 0..bc.len() {
st[j + i] ^= (!bc[(i + 1) % 5]) & bc[(i + 2) % 5];
}
}
// Iota
debug_assert!(r < KECCAKF_RNDC.len());
st[0] ^= KECCAKF_RNDC[r];
// Iota
debug_assert!(r < KECCAKF_RNDC.len());
st[0] ^= KECCAKF_RNDC[r];
}
fn keccak_f800(header_hash: H256, nonce: u64, result: [u32; 8], st: &mut [u32; 25]) {
for i in 0..8 {
st[i] = (header_hash[4 * i] as u32) +
((header_hash[4 * i + 1] as u32) << 8) +
((header_hash[4 * i + 2] as u32) << 16) +
((header_hash[4 * i + 3] as u32) << 24);
}
for i in 0..8 {
st[i] = (header_hash[4 * i] as u32)
+ ((header_hash[4 * i + 1] as u32) << 8)
+ ((header_hash[4 * i + 2] as u32) << 16)
+ ((header_hash[4 * i + 3] as u32) << 24);
}
st[8] = nonce as u32;
st[9] = (nonce >> 32) as u32;
st[8] = nonce as u32;
st[9] = (nonce >> 32) as u32;
for i in 0..8 {
st[10 + i] = result[i];
}
for i in 0..8 {
st[10 + i] = result[i];
}
for r in 0..22 {
keccak_f800_round(st, r);
}
for r in 0..22 {
keccak_f800_round(st, r);
}
}
pub fn keccak_f800_short(header_hash: H256, nonce: u64, result: [u32; 8]) -> u64 {
let mut st = [0u32; 25];
keccak_f800(header_hash, nonce, result, &mut st);
(st[0].swap_bytes() as u64) << 32 | st[1].swap_bytes() as u64
let mut st = [0u32; 25];
keccak_f800(header_hash, nonce, result, &mut st);
(st[0].swap_bytes() as u64) << 32 | st[1].swap_bytes() as u64
}
pub fn keccak_f800_long(header_hash: H256, nonce: u64, result: [u32; 8]) -> H256 {
let mut st = [0u32; 25];
keccak_f800(header_hash, nonce, result, &mut st);
let mut st = [0u32; 25];
keccak_f800(header_hash, nonce, result, &mut st);
// NOTE: transmute from `[u32; 8]` to `[u8; 32]`
unsafe {
std::mem::transmute(
[st[0], st[1], st[2], st[3], st[4], st[5], st[6], st[7]]
)
}
// NOTE: transmute from `[u32; 8]` to `[u8; 32]`
unsafe { std::mem::transmute([st[0], st[1], st[2], st[3], st[4], st[5], st[6], st[7]]) }
}
#[inline]
fn fnv1a_hash(h: u32, d: u32) -> u32 {
(h ^ d).wrapping_mul(FNV_PRIME)
(h ^ d).wrapping_mul(FNV_PRIME)
}
#[derive(Clone)]
struct Kiss99 {
z: u32,
w: u32,
jsr: u32,
jcong: u32,
z: u32,
w: u32,
jsr: u32,
jcong: u32,
}
impl Kiss99 {
fn new(z: u32, w: u32, jsr: u32, jcong: u32) -> Kiss99 {
Kiss99 { z, w, jsr, jcong }
}
fn new(z: u32, w: u32, jsr: u32, jcong: u32) -> Kiss99 {
Kiss99 { z, w, jsr, jcong }
}
#[inline]
fn next_u32(&mut self) -> u32 {
self.z = 36969u32.wrapping_mul(self.z & 65535).wrapping_add(self.z >> 16);
self.w = 18000u32.wrapping_mul(self.w & 65535).wrapping_add(self.w >> 16);
let mwc = (self.z << 16).wrapping_add(self.w);
self.jsr ^= self.jsr << 17;
self.jsr ^= self.jsr >> 13;
self.jsr ^= self.jsr << 5;
self.jcong = 69069u32.wrapping_mul(self.jcong).wrapping_add(1234567);
#[inline]
fn next_u32(&mut self) -> u32 {
self.z = 36969u32
.wrapping_mul(self.z & 65535)
.wrapping_add(self.z >> 16);
self.w = 18000u32
.wrapping_mul(self.w & 65535)
.wrapping_add(self.w >> 16);
let mwc = (self.z << 16).wrapping_add(self.w);
self.jsr ^= self.jsr << 17;
self.jsr ^= self.jsr >> 13;
self.jsr ^= self.jsr << 5;
self.jcong = 69069u32.wrapping_mul(self.jcong).wrapping_add(1234567);
(mwc ^ self.jcong).wrapping_add(self.jsr)
}
(mwc ^ self.jcong).wrapping_add(self.jsr)
}
}
fn fill_mix(seed: u64, lane_id: u32) -> [u32; PROGPOW_REGS] {
// Use FNV to expand the per-warp seed to per-lane
// Use KISS to expand the per-lane seed to fill mix
let z = fnv1a_hash(FNV_HASH, seed as u32);
let w = fnv1a_hash(z, (seed >> 32) as u32);
let jsr = fnv1a_hash(w, lane_id);
let jcong = fnv1a_hash(jsr, lane_id);
// Use FNV to expand the per-warp seed to per-lane
// Use KISS to expand the per-lane seed to fill mix
let z = fnv1a_hash(FNV_HASH, seed as u32);
let w = fnv1a_hash(z, (seed >> 32) as u32);
let jsr = fnv1a_hash(w, lane_id);
let jcong = fnv1a_hash(jsr, lane_id);
let mut rnd = Kiss99::new(z, w, jsr, jcong);
let mut rnd = Kiss99::new(z, w, jsr, jcong);
let mut mix = [0; PROGPOW_REGS];
let mut mix = [0; PROGPOW_REGS];
debug_assert_eq!(PROGPOW_REGS, 32);
for i in 0..32 {
mix[i] = rnd.next_u32();
}
debug_assert_eq!(PROGPOW_REGS, 32);
for i in 0..32 {
mix[i] = rnd.next_u32();
}
mix
mix
}
// Merge new data from b into the value in a. Assuming A has high entropy only
// do ops that retain entropy even if B is low entropy (IE don't do A&B)
fn merge(a: u32, b: u32, r: u32) -> u32 {
match r % 4 {
0 => a.wrapping_mul(33).wrapping_add(b),
1 => (a ^ b).wrapping_mul(33),
2 => a.rotate_left(((r >> 16) % 31) + 1) ^ b,
_ => a.rotate_right(((r >> 16) % 31) + 1) ^ b,
}
match r % 4 {
0 => a.wrapping_mul(33).wrapping_add(b),
1 => (a ^ b).wrapping_mul(33),
2 => a.rotate_left(((r >> 16) % 31) + 1) ^ b,
_ => a.rotate_right(((r >> 16) % 31) + 1) ^ b,
}
}
fn math(a: u32, b: u32, r: u32) -> u32 {
match r % 11 {
0 => a.wrapping_add(b),
1 => a.wrapping_mul(b),
2 => ((a as u64).wrapping_mul(b as u64) >> 32) as u32,
3 => a.min(b),
4 => a.rotate_left(b),
5 => a.rotate_right(b),
6 => a & b,
7 => a | b,
8 => a ^ b,
9 => a.leading_zeros() + b.leading_zeros(),
_ => a.count_ones() + b.count_ones(),
}
match r % 11 {
0 => a.wrapping_add(b),
1 => a.wrapping_mul(b),
2 => ((a as u64).wrapping_mul(b as u64) >> 32) as u32,
3 => a.min(b),
4 => a.rotate_left(b),
5 => a.rotate_right(b),
6 => a & b,
7 => a | b,
8 => a ^ b,
9 => a.leading_zeros() + b.leading_zeros(),
_ => a.count_ones() + b.count_ones(),
}
}
fn progpow_init(seed: u64) -> (Kiss99, [u32; PROGPOW_REGS], [u32; PROGPOW_REGS]) {
let z = fnv1a_hash(FNV_HASH, seed as u32);
let w = fnv1a_hash(z, (seed >> 32) as u32);
let jsr = fnv1a_hash(w, seed as u32);
let jcong = fnv1a_hash(jsr, (seed >> 32) as u32);
let z = fnv1a_hash(FNV_HASH, seed as u32);
let w = fnv1a_hash(z, (seed >> 32) as u32);
let jsr = fnv1a_hash(w, seed as u32);
let jcong = fnv1a_hash(jsr, (seed >> 32) as u32);
let mut rnd = Kiss99::new(z, w, jsr, jcong);
let mut rnd = Kiss99::new(z, w, jsr, jcong);
// Create a random sequence of mix destinations for merge() and mix sources
// for cache reads guarantees every destination merged once and guarantees
// no duplicate cache reads, which could be optimized away. Uses
// Fisher-Yates shuffle.
let mut mix_seq_dst = [0u32; PROGPOW_REGS];
let mut mix_seq_cache = [0u32; PROGPOW_REGS];
for i in 0..mix_seq_dst.len() {
mix_seq_dst[i] = i as u32;
mix_seq_cache[i] = i as u32;
}
// Create a random sequence of mix destinations for merge() and mix sources
// for cache reads guarantees every destination merged once and guarantees
// no duplicate cache reads, which could be optimized away. Uses
// Fisher-Yates shuffle.
let mut mix_seq_dst = [0u32; PROGPOW_REGS];
let mut mix_seq_cache = [0u32; PROGPOW_REGS];
for i in 0..mix_seq_dst.len() {
mix_seq_dst[i] = i as u32;
mix_seq_cache[i] = i as u32;
}
for i in (1..mix_seq_dst.len()).rev() {
let j = rnd.next_u32() as usize % (i + 1);
mix_seq_dst.swap(i, j);
for i in (1..mix_seq_dst.len()).rev() {
let j = rnd.next_u32() as usize % (i + 1);
mix_seq_dst.swap(i, j);
let j = rnd.next_u32() as usize % (i + 1);
mix_seq_cache.swap(i, j);
}
let j = rnd.next_u32() as usize % (i + 1);
mix_seq_cache.swap(i, j);
}
(rnd, mix_seq_dst, mix_seq_cache)
(rnd, mix_seq_dst, mix_seq_cache)
}
pub type CDag = [u32; PROGPOW_CACHE_WORDS];
fn progpow_loop(
seed: u64,
loop_: usize,
mix: &mut [[u32; PROGPOW_REGS]; PROGPOW_LANES],
cache: &[Node],
c_dag: &CDag,
data_size: usize,
seed: u64,
loop_: usize,
mix: &mut [[u32; PROGPOW_REGS]; PROGPOW_LANES],
cache: &[Node],
c_dag: &CDag,
data_size: usize,
) {
// All lanes share a base address for the global load. Global offset uses
// mix[0] to guarantee it depends on the load result.
let g_offset = mix[loop_ % PROGPOW_LANES][0] as usize %
(64 * data_size / (PROGPOW_LANES * PROGPOW_DAG_LOADS));
// All lanes share a base address for the global load. Global offset uses
// mix[0] to guarantee it depends on the load result.
let g_offset = mix[loop_ % PROGPOW_LANES][0] as usize
% (64 * data_size / (PROGPOW_LANES * PROGPOW_DAG_LOADS));
// 256 bytes of dag data
let mut dag_item = [0u32; 64];
// 256 bytes of dag data
let mut dag_item = [0u32; 64];
// Fetch DAG nodes (64 bytes each)
for l in 0..PROGPOW_DAG_LOADS {
let index = g_offset * PROGPOW_LANES * PROGPOW_DAG_LOADS + l * 16;
let node = calculate_dag_item(index as u32 / 16, cache);
dag_item[l * 16..(l + 1) * 16].clone_from_slice(node.as_words());
}
// Fetch DAG nodes (64 bytes each)
for l in 0..PROGPOW_DAG_LOADS {
let index = g_offset * PROGPOW_LANES * PROGPOW_DAG_LOADS + l * 16;
let node = calculate_dag_item(index as u32 / 16, cache);
dag_item[l * 16..(l + 1) * 16].clone_from_slice(node.as_words());
}
let (rnd, mix_seq_dst, mix_seq_cache) = progpow_init(seed);
let (rnd, mix_seq_dst, mix_seq_cache) = progpow_init(seed);
// Lanes can execute in parallel and will be convergent
for l in 0..mix.len() {
let mut rnd = rnd.clone();
// Lanes can execute in parallel and will be convergent
for l in 0..mix.len() {
let mut rnd = rnd.clone();
// Initialize the seed and mix destination sequence
let mut mix_seq_dst_cnt = 0;
let mut mix_seq_cache_cnt = 0;
// Initialize the seed and mix destination sequence
let mut mix_seq_dst_cnt = 0;
let mut mix_seq_cache_cnt = 0;
let mut mix_dst = || {
let res = mix_seq_dst[mix_seq_dst_cnt % PROGPOW_REGS] as usize;
mix_seq_dst_cnt += 1;
res
};
let mut mix_cache = || {
let res = mix_seq_cache[mix_seq_cache_cnt % PROGPOW_REGS] as usize;
mix_seq_cache_cnt += 1;
res
};
let mut mix_dst = || {
let res = mix_seq_dst[mix_seq_dst_cnt % PROGPOW_REGS] as usize;
mix_seq_dst_cnt += 1;
res
};
let mut mix_cache = || {
let res = mix_seq_cache[mix_seq_cache_cnt % PROGPOW_REGS] as usize;
mix_seq_cache_cnt += 1;
res
};
for i in 0..PROGPOW_CNT_CACHE.max(PROGPOW_CNT_MATH) {
if i < PROGPOW_CNT_CACHE {
// Cached memory access, lanes access random 32-bit locations
// within the first portion of the DAG
let offset = mix[l][mix_cache()] as usize % PROGPOW_CACHE_WORDS;
let data = c_dag[offset];
let dst = mix_dst();
for i in 0..PROGPOW_CNT_CACHE.max(PROGPOW_CNT_MATH) {
if i < PROGPOW_CNT_CACHE {
// Cached memory access, lanes access random 32-bit locations
// within the first portion of the DAG
let offset = mix[l][mix_cache()] as usize % PROGPOW_CACHE_WORDS;
let data = c_dag[offset];
let dst = mix_dst();
mix[l][dst] = merge(mix[l][dst], data, rnd.next_u32());
}
mix[l][dst] = merge(mix[l][dst], data, rnd.next_u32());
}
if i < PROGPOW_CNT_MATH {
// Random math
// Generate 2 unique sources
let src_rnd = rnd.next_u32() % (PROGPOW_REGS * (PROGPOW_REGS - 1)) as u32;
let src1 = src_rnd % PROGPOW_REGS as u32; // 0 <= src1 < PROGPOW_REGS
let mut src2 = src_rnd / PROGPOW_REGS as u32; // 0 <= src2 < PROGPOW_REGS - 1
if src2 >= src1 {
src2 += 1; // src2 is now any reg other than src1
}
if i < PROGPOW_CNT_MATH {
// Random math
// Generate 2 unique sources
let src_rnd = rnd.next_u32() % (PROGPOW_REGS * (PROGPOW_REGS - 1)) as u32;
let src1 = src_rnd % PROGPOW_REGS as u32; // 0 <= src1 < PROGPOW_REGS
let mut src2 = src_rnd / PROGPOW_REGS as u32; // 0 <= src2 < PROGPOW_REGS - 1
if src2 >= src1 {
src2 += 1; // src2 is now any reg other than src1
}
let data = math(mix[l][src1 as usize], mix[l][src2 as usize], rnd.next_u32());
let dst = mix_dst();
let data = math(mix[l][src1 as usize], mix[l][src2 as usize], rnd.next_u32());
let dst = mix_dst();
mix[l][dst] = merge(mix[l][dst], data, rnd.next_u32());
}
}
mix[l][dst] = merge(mix[l][dst], data, rnd.next_u32());
}
}
// Global load to sequential locations
let mut data_g = [0u32; PROGPOW_DAG_LOADS];
let index = ((l ^ loop_) % PROGPOW_LANES) * PROGPOW_DAG_LOADS;
for i in 0..PROGPOW_DAG_LOADS {
data_g[i] = dag_item[index + i];
}
// Global load to sequential locations
let mut data_g = [0u32; PROGPOW_DAG_LOADS];
let index = ((l ^ loop_) % PROGPOW_LANES) * PROGPOW_DAG_LOADS;
for i in 0..PROGPOW_DAG_LOADS {
data_g[i] = dag_item[index + i];
}
// Consume the global load data at the very end of the loop to allow
// full latency hiding. Always merge into `mix[0]` to feed the offset
// calculation.
mix[l][0] = merge(mix[l][0], data_g[0], rnd.next_u32());
for i in 1..PROGPOW_DAG_LOADS {
let dst = mix_dst();
mix[l][dst] = merge(mix[l][dst], data_g[i], rnd.next_u32());
}
}
// Consume the global load data at the very end of the loop to allow
// full latency hiding. Always merge into `mix[0]` to feed the offset
// calculation.
mix[l][0] = merge(mix[l][0], data_g[0], rnd.next_u32());
for i in 1..PROGPOW_DAG_LOADS {
let dst = mix_dst();
mix[l][dst] = merge(mix[l][dst], data_g[i], rnd.next_u32());
}
}
}
pub fn progpow(
header_hash: H256,
nonce: u64,
block_number: u64,
cache: &[Node],
c_dag: &CDag,
header_hash: H256,
nonce: u64,
block_number: u64,
cache: &[Node],
c_dag: &CDag,
) -> (H256, H256) {
let mut mix = [[0u32; PROGPOW_REGS]; PROGPOW_LANES];
let mut lane_results = [0u32; PROGPOW_LANES];
let mut result = [0u32; 8];
let mut mix = [[0u32; PROGPOW_REGS]; PROGPOW_LANES];
let mut lane_results = [0u32; PROGPOW_LANES];
let mut result = [0u32; 8];
let data_size = get_data_size(block_number) / PROGPOW_MIX_BYTES;
let data_size = get_data_size(block_number) / PROGPOW_MIX_BYTES;
// NOTE: This assert is required to aid the optimizer elide the non-zero
// remainder check in `progpow_loop`.
assert!(data_size > 0);
// NOTE: This assert is required to aid the optimizer elide the non-zero
// remainder check in `progpow_loop`.
assert!(data_size > 0);
// Initialize mix for all lanes
let seed = keccak_f800_short(header_hash, nonce, result);
// Initialize mix for all lanes
let seed = keccak_f800_short(header_hash, nonce, result);
for l in 0..mix.len() {
mix[l] = fill_mix(seed, l as u32);
}
for l in 0..mix.len() {
mix[l] = fill_mix(seed, l as u32);
}
// Execute the randomly generated inner loop
let period = block_number / PROGPOW_PERIOD_LENGTH as u64;
for i in 0..PROGPOW_CNT_DAG {
progpow_loop(
period,
i,
&mut mix,
cache,
c_dag,
data_size,
);
}
// Execute the randomly generated inner loop
let period = block_number / PROGPOW_PERIOD_LENGTH as u64;
for i in 0..PROGPOW_CNT_DAG {
progpow_loop(period, i, &mut mix, cache, c_dag, data_size);
}
// Reduce mix data to a single per-lane result
for l in 0..lane_results.len() {
lane_results[l] = FNV_HASH;
for i in 0..PROGPOW_REGS {
lane_results[l] = fnv1a_hash(lane_results[l], mix[l][i]);
}
}
// Reduce mix data to a single per-lane result
for l in 0..lane_results.len() {
lane_results[l] = FNV_HASH;
for i in 0..PROGPOW_REGS {
lane_results[l] = fnv1a_hash(lane_results[l], mix[l][i]);
}
}
// Reduce all lanes to a single 128-bit result
result = [FNV_HASH; 8];
for l in 0..PROGPOW_LANES {
result[l % 8] = fnv1a_hash(result[l % 8], lane_results[l]);
}
// Reduce all lanes to a single 128-bit result
result = [FNV_HASH; 8];
for l in 0..PROGPOW_LANES {
result[l % 8] = fnv1a_hash(result[l % 8], lane_results[l]);
}
let digest = keccak_f800_long(header_hash, seed, result);
let digest = keccak_f800_long(header_hash, seed, result);
// NOTE: transmute from `[u32; 8]` to `[u8; 32]`
let result = unsafe { ::std::mem::transmute(result) };
// NOTE: transmute from `[u32; 8]` to `[u8; 32]`
let result = unsafe { ::std::mem::transmute(result) };
(digest, result)
(digest, result)
}
pub fn generate_cdag(cache: &[Node]) -> CDag {
let mut c_dag = [0u32; PROGPOW_CACHE_WORDS];
let mut c_dag = [0u32; PROGPOW_CACHE_WORDS];
for i in 0..PROGPOW_CACHE_WORDS / 16 {
let node = calculate_dag_item(i as u32, cache);
for j in 0..16 {
c_dag[i * 16 + j] = node.as_words()[j];
}
}
for i in 0..PROGPOW_CACHE_WORDS / 16 {
let node = calculate_dag_item(i as u32, cache);
for j in 0..16 {
c_dag[i * 16 + j] = node.as_words()[j];
}
}
c_dag
c_dag
}
#[cfg(test)]
mod test {
use tempdir::TempDir;
use tempdir::TempDir;
use cache::{NodeCacheBuilder, OptimizeFor};
use keccak::H256;
use rustc_hex::FromHex;
use serde_json::{self, Value};
use std::collections::VecDeque;
use super::*;
use super::*;
use cache::{NodeCacheBuilder, OptimizeFor};
use keccak::H256;
use rustc_hex::FromHex;
use serde_json::{self, Value};
use std::collections::VecDeque;
fn h256(hex: &str) -> H256 {
let bytes = FromHex::from_hex(hex).unwrap();
let mut res = [0; 32];
res.copy_from_slice(&bytes);
res
}
fn h256(hex: &str) -> H256 {
let bytes = FromHex::from_hex(hex).unwrap();
let mut res = [0; 32];
res.copy_from_slice(&bytes);
res
}
#[test]
fn test_cdag() {
let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value());
let tempdir = TempDir::new("").unwrap();
let cache = builder.new_cache(tempdir.into_path(), 0);
#[test]
fn test_cdag() {
let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value());
let tempdir = TempDir::new("").unwrap();
let cache = builder.new_cache(tempdir.into_path(), 0);
let c_dag = generate_cdag(cache.as_ref());
let c_dag = generate_cdag(cache.as_ref());
let expected = vec![
690150178u32, 1181503948, 2248155602, 2118233073, 2193871115,
1791778428, 1067701239, 724807309, 530799275, 3480325829, 3899029234,
1998124059, 2541974622, 1100859971, 1297211151, 3268320000, 2217813733,
2690422980, 3172863319, 2651064309
];
let expected = vec![
690150178u32,
1181503948,
2248155602,
2118233073,
2193871115,
1791778428,
1067701239,
724807309,
530799275,
3480325829,
3899029234,
1998124059,
2541974622,
1100859971,
1297211151,
3268320000,
2217813733,
2690422980,
3172863319,
2651064309,
];
assert_eq!(
c_dag.iter().take(20).cloned().collect::<Vec<_>>(),
expected,
);
}
assert_eq!(c_dag.iter().take(20).cloned().collect::<Vec<_>>(), expected,);
}
#[test]
fn test_random_merge() {
let tests = [
(1000000u32, 101u32, 33000101u32),
(2000000, 102, 66003366),
(3000000, 103, 6000103),
(4000000, 104, 2000104),
(1000000, 0, 33000000),
(2000000, 0, 66000000),
(3000000, 0, 6000000),
(4000000, 0, 2000000),
];
#[test]
fn test_random_merge() {
let tests = [
(1000000u32, 101u32, 33000101u32),
(2000000, 102, 66003366),
(3000000, 103, 6000103),
(4000000, 104, 2000104),
(1000000, 0, 33000000),
(2000000, 0, 66000000),
(3000000, 0, 6000000),
(4000000, 0, 2000000),
];
for (i, &(a, b, expected)) in tests.iter().enumerate() {
assert_eq!(
merge(a, b, i as u32),
expected,
);
}
}
for (i, &(a, b, expected)) in tests.iter().enumerate() {
assert_eq!(merge(a, b, i as u32), expected,);
}
}
#[test]
fn test_random_math() {
let tests = [
(20u32, 22u32, 42u32),
(70000, 80000, 1305032704),
(70000, 80000, 1),
(1, 2, 1),
(3, 10000, 196608),
(3, 0, 3),
(3, 6, 2),
(3, 6, 7),
(3, 6, 5),
(0, 0xffffffff, 32),
(3 << 13, 1 << 5, 3),
(22, 20, 42),
(80000, 70000, 1305032704),
(80000, 70000, 1),
(2, 1, 1),
(10000, 3, 80000),
(0, 3, 0),
(6, 3, 2),
(6, 3, 7),
(6, 3, 5),
(0, 0xffffffff, 32),
(3 << 13, 1 << 5, 3),
];
#[test]
fn test_random_math() {
let tests = [
(20u32, 22u32, 42u32),
(70000, 80000, 1305032704),
(70000, 80000, 1),
(1, 2, 1),
(3, 10000, 196608),
(3, 0, 3),
(3, 6, 2),
(3, 6, 7),
(3, 6, 5),
(0, 0xffffffff, 32),
(3 << 13, 1 << 5, 3),
(22, 20, 42),
(80000, 70000, 1305032704),
(80000, 70000, 1),
(2, 1, 1),
(10000, 3, 80000),
(0, 3, 0),
(6, 3, 2),
(6, 3, 7),
(6, 3, 5),
(0, 0xffffffff, 32),
(3 << 13, 1 << 5, 3),
];
for (i, &(a, b, expected)) in tests.iter().enumerate() {
assert_eq!(
math(a, b, i as u32),
expected,
);
}
}
for (i, &(a, b, expected)) in tests.iter().enumerate() {
assert_eq!(math(a, b, i as u32), expected,);
}
}
#[test]
fn test_keccak_256() {
let expected = "5dd431e5fbc604f499bfa0232f45f8f142d0ff5178f539e5a7800bf0643697af";
assert_eq!(
keccak_f800_long([0; 32], 0, [0; 8]),
h256(expected),
);
}
#[test]
fn test_keccak_256() {
let expected = "5dd431e5fbc604f499bfa0232f45f8f142d0ff5178f539e5a7800bf0643697af";
assert_eq!(keccak_f800_long([0; 32], 0, [0; 8]), h256(expected),);
}
#[test]
fn test_keccak_64() {
let expected: u64 = 0x5dd431e5fbc604f4;
assert_eq!(
keccak_f800_short([0; 32], 0, [0; 8]),
expected,
);
}
#[test]
fn test_keccak_64() {
let expected: u64 = 0x5dd431e5fbc604f4;
assert_eq!(keccak_f800_short([0; 32], 0, [0; 8]), expected,);
}
#[test]
fn test_progpow_hash() {
let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value());
let tempdir = TempDir::new("").unwrap();
let cache = builder.new_cache(tempdir.into_path(), 0);
let c_dag = generate_cdag(cache.as_ref());
#[test]
fn test_progpow_hash() {
let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value());
let tempdir = TempDir::new("").unwrap();
let cache = builder.new_cache(tempdir.into_path(), 0);
let c_dag = generate_cdag(cache.as_ref());
let header_hash = [0; 32];
let header_hash = [0; 32];
let (digest, result) = progpow(
header_hash,
0,
0,
cache.as_ref(),
&c_dag,
);
let (digest, result) = progpow(header_hash, 0, 0, cache.as_ref(), &c_dag);
let expected_digest = FromHex::from_hex("63155f732f2bf556967f906155b510c917e48e99685ead76ea83f4eca03ab12b").unwrap();
let expected_result = FromHex::from_hex("faeb1be51075b03a4ff44b335067951ead07a3b078539ace76fd56fc410557a3").unwrap();
let expected_digest =
FromHex::from_hex("63155f732f2bf556967f906155b510c917e48e99685ead76ea83f4eca03ab12b")
.unwrap();
let expected_result =
FromHex::from_hex("faeb1be51075b03a4ff44b335067951ead07a3b078539ace76fd56fc410557a3")
.unwrap();
assert_eq!(
digest.to_vec(),
expected_digest,
);
assert_eq!(digest.to_vec(), expected_digest,);
assert_eq!(
result.to_vec(),
expected_result,
);
}
assert_eq!(result.to_vec(), expected_result,);
}
#[test]
fn test_progpow_testvectors() {
struct ProgpowTest {
block_number: u64,
header_hash: H256,
nonce: u64,
mix_hash: H256,
final_hash: H256,
}
#[test]
fn test_progpow_testvectors() {
struct ProgpowTest {
block_number: u64,
header_hash: H256,
nonce: u64,
mix_hash: H256,
final_hash: H256,
}
let tests: Vec<VecDeque<Value>> =
serde_json::from_slice(include_bytes!("../res/progpow_testvectors.json")).unwrap();
let tests: Vec<VecDeque<Value>> =
serde_json::from_slice(include_bytes!("../res/progpow_testvectors.json")).unwrap();
let tests: Vec<ProgpowTest> = tests.into_iter().map(|mut test: VecDeque<Value>| {
assert!(test.len() == 5);
let tests: Vec<ProgpowTest> = tests
.into_iter()
.map(|mut test: VecDeque<Value>| {
assert!(test.len() == 5);
let block_number: u64 = serde_json::from_value(test.pop_front().unwrap()).unwrap();
let header_hash: String = serde_json::from_value(test.pop_front().unwrap()).unwrap();
let nonce: String = serde_json::from_value(test.pop_front().unwrap()).unwrap();
let mix_hash: String = serde_json::from_value(test.pop_front().unwrap()).unwrap();
let final_hash: String = serde_json::from_value(test.pop_front().unwrap()).unwrap();
let block_number: u64 = serde_json::from_value(test.pop_front().unwrap()).unwrap();
let header_hash: String =
serde_json::from_value(test.pop_front().unwrap()).unwrap();
let nonce: String = serde_json::from_value(test.pop_front().unwrap()).unwrap();
let mix_hash: String = serde_json::from_value(test.pop_front().unwrap()).unwrap();
let final_hash: String = serde_json::from_value(test.pop_front().unwrap()).unwrap();
ProgpowTest {
block_number,
header_hash: h256(&header_hash),
nonce: u64::from_str_radix(&nonce, 16).unwrap(),
mix_hash: h256(&mix_hash),
final_hash: h256(&final_hash),
}
}).collect();
ProgpowTest {
block_number,
header_hash: h256(&header_hash),
nonce: u64::from_str_radix(&nonce, 16).unwrap(),
mix_hash: h256(&mix_hash),
final_hash: h256(&final_hash),
}
})
.collect();
for test in tests {
let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value());
let tempdir = TempDir::new("").unwrap();
let cache = builder.new_cache(tempdir.path().to_owned(), test.block_number);
let c_dag = generate_cdag(cache.as_ref());
for test in tests {
let builder = NodeCacheBuilder::new(OptimizeFor::Memory, u64::max_value());
let tempdir = TempDir::new("").unwrap();
let cache = builder.new_cache(tempdir.path().to_owned(), test.block_number);
let c_dag = generate_cdag(cache.as_ref());
let (digest, result) = progpow(
test.header_hash,
test.nonce,
test.block_number,
cache.as_ref(),
&c_dag,
);
let (digest, result) = progpow(
test.header_hash,
test.nonce,
test.block_number,
cache.as_ref(),
&c_dag,
);
assert_eq!(digest, test.final_hash);
assert_eq!(result, test.mix_hash);
}
}
assert_eq!(digest, test.final_hash);
assert_eq!(result, test.mix_hash);
}
}
}

View File

@ -14,89 +14,97 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use shared;
use keccak::{keccak_256, H256};
use shared;
use std::cell::Cell;
#[derive(Default)]
pub struct SeedHashCompute {
prev_epoch: Cell<u64>,
prev_seedhash: Cell<H256>,
prev_epoch: Cell<u64>,
prev_seedhash: Cell<H256>,
}
impl SeedHashCompute {
#[inline]
fn reset_cache(&self) {
self.prev_epoch.set(0);
self.prev_seedhash.set([0u8; 32]);
}
#[inline]
fn reset_cache(&self) {
self.prev_epoch.set(0);
self.prev_seedhash.set([0u8; 32]);
}
#[inline]
pub fn hash_block_number(&self, block_number: u64) -> H256 {
self.hash_epoch(shared::epoch(block_number))
}
#[inline]
pub fn hash_block_number(&self, block_number: u64) -> H256 {
self.hash_epoch(shared::epoch(block_number))
}
#[inline]
pub fn hash_epoch(&self, epoch: u64) -> H256 {
if epoch < self.prev_epoch.get() {
// can't build on previous hash if requesting an older block
self.reset_cache();
}
if epoch > self.prev_epoch.get() {
let seed_hash = SeedHashCompute::resume_compute_seedhash(
self.prev_seedhash.get(),
self.prev_epoch.get(),
epoch,
);
self.prev_seedhash.set(seed_hash);
self.prev_epoch.set(epoch);
}
self.prev_seedhash.get()
}
#[inline]
pub fn hash_epoch(&self, epoch: u64) -> H256 {
if epoch < self.prev_epoch.get() {
// can't build on previous hash if requesting an older block
self.reset_cache();
}
if epoch > self.prev_epoch.get() {
let seed_hash = SeedHashCompute::resume_compute_seedhash(
self.prev_seedhash.get(),
self.prev_epoch.get(),
epoch,
);
self.prev_seedhash.set(seed_hash);
self.prev_epoch.set(epoch);
}
self.prev_seedhash.get()
}
#[inline]
pub fn resume_compute_seedhash(mut hash: H256, start_epoch: u64, end_epoch: u64) -> H256 {
for _ in start_epoch..end_epoch {
keccak_256::inplace(&mut hash);
}
hash
}
#[inline]
pub fn resume_compute_seedhash(mut hash: H256, start_epoch: u64, end_epoch: u64) -> H256 {
for _ in start_epoch..end_epoch {
keccak_256::inplace(&mut hash);
}
hash
}
}
#[cfg(test)]
mod tests {
use super::SeedHashCompute;
use super::SeedHashCompute;
#[test]
fn test_seed_compute_once() {
let seed_compute = SeedHashCompute::default();
let hash = [241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162];
assert_eq!(seed_compute.hash_block_number(486382), hash);
}
#[test]
fn test_seed_compute_once() {
let seed_compute = SeedHashCompute::default();
let hash = [
241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253,
147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162,
];
assert_eq!(seed_compute.hash_block_number(486382), hash);
}
#[test]
fn test_seed_compute_zero() {
let seed_compute = SeedHashCompute::default();
assert_eq!(seed_compute.hash_block_number(0), [0u8; 32]);
}
#[test]
fn test_seed_compute_zero() {
let seed_compute = SeedHashCompute::default();
assert_eq!(seed_compute.hash_block_number(0), [0u8; 32]);
}
#[test]
fn test_seed_compute_after_older() {
let seed_compute = SeedHashCompute::default();
// calculating an older value first shouldn't affect the result
let _ = seed_compute.hash_block_number(50000);
let hash = [241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162];
assert_eq!(seed_compute.hash_block_number(486382), hash);
}
#[test]
fn test_seed_compute_after_newer() {
let seed_compute = SeedHashCompute::default();
// calculating an newer value first shouldn't affect the result
let _ = seed_compute.hash_block_number(972764);
let hash = [241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162];
assert_eq!(seed_compute.hash_block_number(486382), hash);
}
#[test]
fn test_seed_compute_after_older() {
let seed_compute = SeedHashCompute::default();
// calculating an older value first shouldn't affect the result
let _ = seed_compute.hash_block_number(50000);
let hash = [
241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253,
147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162,
];
assert_eq!(seed_compute.hash_block_number(486382), hash);
}
#[test]
fn test_seed_compute_after_newer() {
let seed_compute = SeedHashCompute::default();
// calculating an newer value first shouldn't affect the result
let _ = seed_compute.hash_block_number(972764);
let hash = [
241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253,
147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162,
];
assert_eq!(seed_compute.hash_block_number(486382), hash);
}
}

View File

@ -31,38 +31,39 @@ pub const NODE_WORDS: usize = NODE_BYTES / 4;
pub const NODE_BYTES: usize = 64;
pub fn epoch(block_number: u64) -> u64 {
block_number / ETHASH_EPOCH_LENGTH
block_number / ETHASH_EPOCH_LENGTH
}
static CHARS: &'static [u8] = b"0123456789abcdef";
pub fn to_hex(bytes: &[u8]) -> String {
let mut v = Vec::with_capacity(bytes.len() * 2);
for &byte in bytes.iter() {
v.push(CHARS[(byte >> 4) as usize]);
v.push(CHARS[(byte & 0xf) as usize]);
}
let mut v = Vec::with_capacity(bytes.len() * 2);
for &byte in bytes.iter() {
v.push(CHARS[(byte >> 4) as usize]);
v.push(CHARS[(byte & 0xf) as usize]);
}
unsafe { String::from_utf8_unchecked(v) }
unsafe { String::from_utf8_unchecked(v) }
}
pub fn get_cache_size(block_number: u64) -> usize {
// TODO: Memoise
let mut sz: u64 = CACHE_BYTES_INIT + CACHE_BYTES_GROWTH * (block_number / ETHASH_EPOCH_LENGTH);
sz = sz - NODE_BYTES as u64;
while !is_prime(sz / NODE_BYTES as u64) {
sz = sz - 2 * NODE_BYTES as u64;
}
sz as usize
// TODO: Memoise
let mut sz: u64 = CACHE_BYTES_INIT + CACHE_BYTES_GROWTH * (block_number / ETHASH_EPOCH_LENGTH);
sz = sz - NODE_BYTES as u64;
while !is_prime(sz / NODE_BYTES as u64) {
sz = sz - 2 * NODE_BYTES as u64;
}
sz as usize
}
pub fn get_data_size(block_number: u64) -> usize {
// TODO: Memoise
let mut sz: u64 = DATASET_BYTES_INIT + DATASET_BYTES_GROWTH * (block_number / ETHASH_EPOCH_LENGTH);
sz = sz - ETHASH_MIX_BYTES as u64;
while !is_prime(sz / ETHASH_MIX_BYTES as u64) {
sz = sz - 2 * ETHASH_MIX_BYTES as u64;
}
sz as usize
// TODO: Memoise
let mut sz: u64 =
DATASET_BYTES_INIT + DATASET_BYTES_GROWTH * (block_number / ETHASH_EPOCH_LENGTH);
sz = sz - ETHASH_MIX_BYTES as u64;
while !is_prime(sz / ETHASH_MIX_BYTES as u64) {
sz = sz - 2 * ETHASH_MIX_BYTES as u64;
}
sz as usize
}
pub type NodeBytes = [u8; NODE_BYTES];
@ -100,15 +101,19 @@ static_assert_size_eq!(Node, NodeBytes, NodeWords, NodeDwords);
#[repr(C)]
pub union Node {
pub dwords: NodeDwords,
pub words: NodeWords,
pub bytes: NodeBytes,
pub dwords: NodeDwords,
pub words: NodeWords,
pub bytes: NodeBytes,
}
impl Clone for Node {
fn clone(&self) -> Self {
unsafe { Node { bytes: *&self.bytes } }
}
fn clone(&self) -> Self {
unsafe {
Node {
bytes: *&self.bytes,
}
}
}
}
// We use `inline(always)` because I was experiencing an 100% slowdown and `perf` showed that these
@ -117,33 +122,33 @@ impl Clone for Node {
// performance regression. It's not caused by the `debug_assert_eq!` either, your guess is as good
// as mine.
impl Node {
#[inline(always)]
pub fn as_bytes(&self) -> &NodeBytes {
unsafe { &self.bytes }
}
#[inline(always)]
pub fn as_bytes(&self) -> &NodeBytes {
unsafe { &self.bytes }
}
#[inline(always)]
pub fn as_bytes_mut(&mut self) -> &mut NodeBytes {
unsafe { &mut self.bytes }
}
#[inline(always)]
pub fn as_bytes_mut(&mut self) -> &mut NodeBytes {
unsafe { &mut self.bytes }
}
#[inline(always)]
pub fn as_words(&self) -> &NodeWords {
unsafe { &self.words }
}
#[inline(always)]
pub fn as_words(&self) -> &NodeWords {
unsafe { &self.words }
}
#[inline(always)]
pub fn as_words_mut(&mut self) -> &mut NodeWords {
unsafe { &mut self.words }
}
#[inline(always)]
pub fn as_words_mut(&mut self) -> &mut NodeWords {
unsafe { &mut self.words }
}
#[inline(always)]
pub fn as_dwords(&self) -> &NodeDwords {
unsafe { &self.dwords }
}
#[inline(always)]
pub fn as_dwords(&self) -> &NodeDwords {
unsafe { &self.dwords }
}
#[inline(always)]
pub fn as_dwords_mut(&mut self) -> &mut NodeDwords {
unsafe { &mut self.dwords }
}
#[inline(always)]
pub fn as_dwords_mut(&mut self) -> &mut NodeDwords {
unsafe { &mut self.dwords }
}
}

View File

@ -19,129 +19,126 @@ extern crate criterion;
#[macro_use]
extern crate lazy_static;
extern crate ethcore_builtin;
extern crate ethcore;
extern crate ethcore_builtin;
extern crate ethereum_types;
extern crate parity_bytes as bytes;
extern crate rustc_hex;
use criterion::{Criterion, Bencher};
use bytes::BytesRef;
use criterion::{Bencher, Criterion};
use ethcore::{ethereum::new_byzantium_test_machine, machine::EthereumMachine};
use ethcore_builtin::Builtin;
use ethcore::machine::EthereumMachine;
use ethereum_types::U256;
use ethcore::ethereum::new_byzantium_test_machine;
use rustc_hex::FromHex;
lazy_static! {
static ref BYZANTIUM_MACHINE: EthereumMachine = new_byzantium_test_machine();
static ref BYZANTIUM_MACHINE: EthereumMachine = new_byzantium_test_machine();
}
struct BuiltinBenchmark<'a> {
builtin: &'a Builtin,
input: Vec<u8>,
expected: Vec<u8>,
builtin: &'a Builtin,
input: Vec<u8>,
expected: Vec<u8>,
}
impl<'a> BuiltinBenchmark<'a> {
fn new(builtin_address: &'static str, input: &str, expected: &str) -> BuiltinBenchmark<'a> {
let builtins = BYZANTIUM_MACHINE.builtins();
fn new(builtin_address: &'static str, input: &str, expected: &str) -> BuiltinBenchmark<'a> {
let builtins = BYZANTIUM_MACHINE.builtins();
let builtin = builtins.get(&builtin_address.into()).unwrap().clone();
let input = FromHex::from_hex(input).unwrap();
let expected = FromHex::from_hex(expected).unwrap();
let builtin = builtins.get(&builtin_address.into()).unwrap().clone();
let input = FromHex::from_hex(input).unwrap();
let expected = FromHex::from_hex(expected).unwrap();
BuiltinBenchmark {
builtin, input, expected
}
}
BuiltinBenchmark {
builtin,
input,
expected,
}
}
fn run(&self, b: &mut Bencher) {
let mut output = vec![0; self.expected.len()];
fn run(&self, b: &mut Bencher) {
let mut output = vec![0; self.expected.len()];
b.iter(|| {
self.builtin.execute(&self.input, &mut BytesRef::Fixed(&mut output)).unwrap();
});
b.iter(|| {
self.builtin
.execute(&self.input, &mut BytesRef::Fixed(&mut output))
.unwrap();
});
assert_eq!(self.expected[..], output[..]);
}
assert_eq!(self.expected[..], output[..]);
}
}
fn bench(
id: &str,
builtin_address: &'static str,
input: &str,
expected: &str,
b: &mut Criterion,
) {
let bench = BuiltinBenchmark::new(builtin_address, input, expected);
b.bench_function(id, move |b| bench.run(b));
fn bench(id: &str, builtin_address: &'static str, input: &str, expected: &str, b: &mut Criterion) {
let bench = BuiltinBenchmark::new(builtin_address, input, expected);
b.bench_function(id, move |b| bench.run(b));
}
criterion_group!(
builtin,
ecrecover,
sha256,
ripemd,
identity,
modexp_eip_example1,
modexp_eip_example2,
modexp_nagydani_1_square,
modexp_nagydani_1_qube,
modexp_nagydani_1_pow0x10001,
modexp_nagydani_2_square,
modexp_nagydani_2_qube,
modexp_nagydani_2_pow0x10001,
modexp_nagydani_3_square,
modexp_nagydani_3_qube,
modexp_nagydani_3_pow0x10001,
modexp_nagydani_4_square,
modexp_nagydani_4_qube,
modexp_nagydani_4_pow0x10001,
modexp_nagydani_5_square,
modexp_nagydani_5_qube,
modexp_nagydani_5_pow0x10001,
alt_bn128_add_chfast1,
alt_bn128_add_chfast2,
alt_bn128_add_cdetrio1,
alt_bn128_add_cdetrio2,
alt_bn128_add_cdetrio3,
alt_bn128_add_cdetrio4,
alt_bn128_add_cdetrio5,
alt_bn128_add_cdetrio6,
alt_bn128_add_cdetrio7,
alt_bn128_add_cdetrio8,
alt_bn128_add_cdetrio9,
alt_bn128_add_cdetrio10,
alt_bn128_add_cdetrio11,
alt_bn128_add_cdetrio12,
alt_bn128_add_cdetrio13,
alt_bn128_add_cdetrio14,
alt_bn128_mul_chfast1,
alt_bn128_mul_chfast2,
alt_bn128_mul_chfast3,
alt_bn128_mul_cdetrio1,
alt_bn128_mul_cdetrio6,
alt_bn128_mul_cdetrio11,
alt_bn128_pairing_jeff1,
alt_bn128_pairing_jeff2,
alt_bn128_pairing_jeff3,
alt_bn128_pairing_jeff4,
alt_bn128_pairing_jeff5,
alt_bn128_pairing_jeff6,
alt_bn128_pairing_empty_data,
alt_bn128_pairing_one_point,
alt_bn128_pairing_two_point_match_2,
alt_bn128_pairing_two_point_match_3,
alt_bn128_pairing_two_point_match_4,
alt_bn128_pairing_ten_point_match_1,
alt_bn128_pairing_ten_point_match_2,
alt_bn128_pairing_ten_point_match_3
builtin,
ecrecover,
sha256,
ripemd,
identity,
modexp_eip_example1,
modexp_eip_example2,
modexp_nagydani_1_square,
modexp_nagydani_1_qube,
modexp_nagydani_1_pow0x10001,
modexp_nagydani_2_square,
modexp_nagydani_2_qube,
modexp_nagydani_2_pow0x10001,
modexp_nagydani_3_square,
modexp_nagydani_3_qube,
modexp_nagydani_3_pow0x10001,
modexp_nagydani_4_square,
modexp_nagydani_4_qube,
modexp_nagydani_4_pow0x10001,
modexp_nagydani_5_square,
modexp_nagydani_5_qube,
modexp_nagydani_5_pow0x10001,
alt_bn128_add_chfast1,
alt_bn128_add_chfast2,
alt_bn128_add_cdetrio1,
alt_bn128_add_cdetrio2,
alt_bn128_add_cdetrio3,
alt_bn128_add_cdetrio4,
alt_bn128_add_cdetrio5,
alt_bn128_add_cdetrio6,
alt_bn128_add_cdetrio7,
alt_bn128_add_cdetrio8,
alt_bn128_add_cdetrio9,
alt_bn128_add_cdetrio10,
alt_bn128_add_cdetrio11,
alt_bn128_add_cdetrio12,
alt_bn128_add_cdetrio13,
alt_bn128_add_cdetrio14,
alt_bn128_mul_chfast1,
alt_bn128_mul_chfast2,
alt_bn128_mul_chfast3,
alt_bn128_mul_cdetrio1,
alt_bn128_mul_cdetrio6,
alt_bn128_mul_cdetrio11,
alt_bn128_pairing_jeff1,
alt_bn128_pairing_jeff2,
alt_bn128_pairing_jeff3,
alt_bn128_pairing_jeff4,
alt_bn128_pairing_jeff5,
alt_bn128_pairing_jeff6,
alt_bn128_pairing_empty_data,
alt_bn128_pairing_one_point,
alt_bn128_pairing_two_point_match_2,
alt_bn128_pairing_two_point_match_3,
alt_bn128_pairing_two_point_match_4,
alt_bn128_pairing_ten_point_match_1,
alt_bn128_pairing_ten_point_match_2,
alt_bn128_pairing_ten_point_match_3
);
criterion_main!(builtin);
fn ecrecover(b: &mut Criterion) {
bench(
bench(
"ecrecover",
"0000000000000000000000000000000000000001", // ecrecover
"38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02",
@ -151,7 +148,7 @@ fn ecrecover(b: &mut Criterion) {
}
fn sha256(b: &mut Criterion) {
bench(
bench(
"sha256",
"0000000000000000000000000000000000000002", // sha256
"38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02",
@ -161,7 +158,7 @@ fn sha256(b: &mut Criterion) {
}
fn ripemd(b: &mut Criterion) {
bench(
bench(
"ripemd",
"0000000000000000000000000000000000000003", // ripemd
"38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02",
@ -171,7 +168,7 @@ fn ripemd(b: &mut Criterion) {
}
fn identity(b: &mut Criterion) {
bench(
bench(
"identity",
"0000000000000000000000000000000000000004", // identity
"38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02",
@ -181,7 +178,7 @@ fn identity(b: &mut Criterion) {
}
fn modexp_eip_example1(b: &mut Criterion) {
bench(
bench(
"modexp_eip_example1",
"0000000000000000000000000000000000000005", // modexp
"00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002003fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2efffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f",
@ -191,7 +188,7 @@ fn modexp_eip_example1(b: &mut Criterion) {
}
fn modexp_eip_example2(b: &mut Criterion) {
bench(
bench(
"modexp_eip_example2",
"0000000000000000000000000000000000000005", // modexp
"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000020fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2efffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f",
@ -201,7 +198,7 @@ fn modexp_eip_example2(b: &mut Criterion) {
}
fn modexp_nagydani_1_square(b: &mut Criterion) {
bench(
bench(
"modexp_nagydani_1_square",
"0000000000000000000000000000000000000005", // modexp
"000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb502fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b",
@ -211,7 +208,7 @@ fn modexp_nagydani_1_square(b: &mut Criterion) {
}
fn modexp_nagydani_1_qube(b: &mut Criterion) {
bench(
bench(
"modexp_nagydani_1_qube",
"0000000000000000000000000000000000000005", // modexp
"000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb503fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b",
@ -221,7 +218,7 @@ fn modexp_nagydani_1_qube(b: &mut Criterion) {
}
fn modexp_nagydani_1_pow0x10001(b: &mut Criterion) {
bench(
bench(
"modexp_nagydani_1_pow0x10001",
"0000000000000000000000000000000000000005", // modexp
"000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000040e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb5010001fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b",
@ -231,7 +228,7 @@ fn modexp_nagydani_1_pow0x10001(b: &mut Criterion) {
}
fn modexp_nagydani_2_square(b: &mut Criterion) {
bench(
bench(
"modexp_nagydani_2_square",
"0000000000000000000000000000000000000005", // modexp
"000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf5102e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087",
@ -241,7 +238,7 @@ fn modexp_nagydani_2_square(b: &mut Criterion) {
}
fn modexp_nagydani_2_qube(b: &mut Criterion) {
bench(
bench(
"modexp_nagydani_2_qube",
"0000000000000000000000000000000000000005", // modexp
"000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf5103e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087",
@ -251,7 +248,7 @@ fn modexp_nagydani_2_qube(b: &mut Criterion) {
}
fn modexp_nagydani_2_pow0x10001(b: &mut Criterion) {
bench(
bench(
"modexp_nagydani_2_pow0x10001",
"0000000000000000000000000000000000000005", // modexp
"000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000080cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf51010001e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087",
@ -261,7 +258,7 @@ fn modexp_nagydani_2_pow0x10001(b: &mut Criterion) {
}
fn modexp_nagydani_3_square(b: &mut Criterion) {
bench(
bench(
"modexp_nagydani_3_square",
"0000000000000000000000000000000000000005", // modexp
"000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000100c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb02d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d",
@ -271,7 +268,7 @@ fn modexp_nagydani_3_square(b: &mut Criterion) {
}
fn modexp_nagydani_3_qube(b: &mut Criterion) {
bench(
bench(
"modexp_nagydani_3_qube",
"0000000000000000000000000000000000000005", // modexp
"000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000100c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb03d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d",
@ -281,7 +278,7 @@ fn modexp_nagydani_3_qube(b: &mut Criterion) {
}
fn modexp_nagydani_3_pow0x10001(b: &mut Criterion) {
bench(
bench(
"modexp_nagydani_3_pow0x10001",
"0000000000000000000000000000000000000005", // modexp
"000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000100c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb010001d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d",
@ -291,7 +288,7 @@ fn modexp_nagydani_3_pow0x10001(b: &mut Criterion) {
}
fn modexp_nagydani_4_square(b: &mut Criterion) {
bench(
bench(
"modexp_nagydani_4_square",
"0000000000000000000000000000000000000005", // modexp
"000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000200db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b8102df3143a0057457d75e8c708b6337a6f5a4fd1a06727acf9fb93e2993c62f3378b37d56c85e7b1e00f0145ebf8e4095bd723166293c60b6ac1252291ef65823c9e040ddad14969b3b340a4ef714db093a587c37766d68b8d6b5016e741587e7e6bf7e763b44f0247e64bae30f994d248bfd20541a333e5b225ef6a61199e301738b1e688f70ec1d7fb892c183c95dc543c3e12adf8a5e8b9ca9d04f9445cced3ab256f29e998e69efaa633a7b60e1db5a867924ccab0a171d9d6e1098dfa15acde9553de599eaa56490c8f411e4985111f3d40bddfc5e301edb01547b01a886550a61158f7e2033c59707789bf7c854181d0c2e2a42a93cf09209747d7082e147eb8544de25c3eb14f2e35559ea0c0f5877f2f3fc92132c0ae9da4e45b2f6c866a224ea6d1f28c05320e287750fbc647368d41116e528014cc1852e5531d53e4af938374daba6cee4baa821ed07117253bb3601ddd00d59a3d7fb2ef1f5a2fbba7c429f0cf9a5b3462410fd833a69118f8be9c559b1000cc608fd877fb43f8e65c2d1302622b944462579056874b387208d90623fcdaf93920ca7a9e4ba64ea208758222ad868501cc2c345e2d3a5ea2a17e5069248138c8a79c0251185d29ee73e5afab5354769142d2bf0cb6712727aa6bf84a6245fcdae66e4938d84d1b9dd09a884818622080ff5f98942fb20acd7e0c916c2d5ea7ce6f7e173315384518f",
@ -301,7 +298,7 @@ fn modexp_nagydani_4_square(b: &mut Criterion) {
}
fn modexp_nagydani_4_qube(b: &mut Criterion) {
bench(
bench(
"modexp_nagydani_4_qube",
"0000000000000000000000000000000000000005", // modexp
"000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000200db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b8103df3143a0057457d75e8c708b6337a6f5a4fd1a06727acf9fb93e2993c62f3378b37d56c85e7b1e00f0145ebf8e4095bd723166293c60b6ac1252291ef65823c9e040ddad14969b3b340a4ef714db093a587c37766d68b8d6b5016e741587e7e6bf7e763b44f0247e64bae30f994d248bfd20541a333e5b225ef6a61199e301738b1e688f70ec1d7fb892c183c95dc543c3e12adf8a5e8b9ca9d04f9445cced3ab256f29e998e69efaa633a7b60e1db5a867924ccab0a171d9d6e1098dfa15acde9553de599eaa56490c8f411e4985111f3d40bddfc5e301edb01547b01a886550a61158f7e2033c59707789bf7c854181d0c2e2a42a93cf09209747d7082e147eb8544de25c3eb14f2e35559ea0c0f5877f2f3fc92132c0ae9da4e45b2f6c866a224ea6d1f28c05320e287750fbc647368d41116e528014cc1852e5531d53e4af938374daba6cee4baa821ed07117253bb3601ddd00d59a3d7fb2ef1f5a2fbba7c429f0cf9a5b3462410fd833a69118f8be9c559b1000cc608fd877fb43f8e65c2d1302622b944462579056874b387208d90623fcdaf93920ca7a9e4ba64ea208758222ad868501cc2c345e2d3a5ea2a17e5069248138c8a79c0251185d29ee73e5afab5354769142d2bf0cb6712727aa6bf84a6245fcdae66e4938d84d1b9dd09a884818622080ff5f98942fb20acd7e0c916c2d5ea7ce6f7e173315384518f",
@ -311,7 +308,7 @@ fn modexp_nagydani_4_qube(b: &mut Criterion) {
}
fn modexp_nagydani_4_pow0x10001(b: &mut Criterion) {
bench(
bench(
"modexp_nagydani_4_pow0x10001",
"0000000000000000000000000000000000000005", // modexp
"000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000200db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b81010001df3143a0057457d75e8c708b6337a6f5a4fd1a06727acf9fb93e2993c62f3378b37d56c85e7b1e00f0145ebf8e4095bd723166293c60b6ac1252291ef65823c9e040ddad14969b3b340a4ef714db093a587c37766d68b8d6b5016e741587e7e6bf7e763b44f0247e64bae30f994d248bfd20541a333e5b225ef6a61199e301738b1e688f70ec1d7fb892c183c95dc543c3e12adf8a5e8b9ca9d04f9445cced3ab256f29e998e69efaa633a7b60e1db5a867924ccab0a171d9d6e1098dfa15acde9553de599eaa56490c8f411e4985111f3d40bddfc5e301edb01547b01a886550a61158f7e2033c59707789bf7c854181d0c2e2a42a93cf09209747d7082e147eb8544de25c3eb14f2e35559ea0c0f5877f2f3fc92132c0ae9da4e45b2f6c866a224ea6d1f28c05320e287750fbc647368d41116e528014cc1852e5531d53e4af938374daba6cee4baa821ed07117253bb3601ddd00d59a3d7fb2ef1f5a2fbba7c429f0cf9a5b3462410fd833a69118f8be9c559b1000cc608fd877fb43f8e65c2d1302622b944462579056874b387208d90623fcdaf93920ca7a9e4ba64ea208758222ad868501cc2c345e2d3a5ea2a17e5069248138c8a79c0251185d29ee73e5afab5354769142d2bf0cb6712727aa6bf84a6245fcdae66e4938d84d1b9dd09a884818622080ff5f98942fb20acd7e0c916c2d5ea7ce6f7e173315384518f",
@ -321,7 +318,7 @@ fn modexp_nagydani_4_pow0x10001(b: &mut Criterion) {
}
fn modexp_nagydani_5_square(b: &mut Criterion) {
bench(
bench(
"modexp_nagydani_5_square",
"0000000000000000000000000000000000000005", // modexp
"000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000400c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf02e30049201ec12937e7ce79d0f55d9c810e20acf52212aca1d3888949e0e4830aad88d804161230eb89d4d329cc83570fe257217d2119134048dd2ed167646975fc7d77136919a049ea74cf08ddd2b896890bb24a0ba18094a22baa351bf29ad96c66bbb1a598f2ca391749620e62d61c3561a7d3653ccc8892c7b99baaf76bf836e2991cb06d6bc0514568ff0d1ec8bb4b3d6984f5eaefb17d3ea2893722375d3ddb8e389a8eef7d7d198f8e687d6a513983df906099f9a2d23f4f9dec6f8ef2f11fc0a21fac45353b94e00486f5e17d386af42502d09db33cf0cf28310e049c07e88682aeeb00cb833c5174266e62407a57583f1f88b304b7c6e0c84bbe1c0fd423072d37a5bd0aacf764229e5c7cd02473460ba3645cd8e8ae144065bf02d0dd238593d8e230354f67e0b2f23012c23274f80e3ee31e35e2606a4a3f31d94ab755e6d163cff52cbb36b6d0cc67ffc512aeed1dce4d7a0d70ce82f2baba12e8d514dc92a056f994adfb17b5b9712bd5186f27a2fda1f7039c5df2c8587fdc62f5627580c13234b55be4df3056050e2d1ef3218f0dd66cb05265fe1acfb0989d8213f2c19d1735a7cf3fa65d88dad5af52dc2bba22b7abf46c3bc77b5091baab9e8f0ddc4d5e581037de91a9f8dcbc69309be29cc815cf19a20a7585b8b3073edf51fc9baeb3e509b97fa4ecfd621e0fd57bd61cac1b895c03248ff12bdbc57509250df3517e8a3fe1d776836b34ab352b973d932ef708b14f7418f9eceb1d87667e61e3e758649cb083f01b133d37ab2f5afa96d6c84bcacf4efc3851ad308c1e7d9113624fce29fab460ab9d2a48d92cdb281103a5250ad44cb2ff6e67ac670c02fdafb3e0f1353953d6d7d5646ca1568dea55275a050ec501b7c6250444f7219f1ba7521ba3b93d089727ca5f3bbe0d6c1300b423377004954c5628fdb65770b18ced5c9b23a4a5a6d6ef25fe01b4ce278de0bcc4ed86e28a0a68818ffa40970128cf2c38740e80037984428c1bd5113f40ff47512ee6f4e4d8f9b8e8e1b3040d2928d003bd1c1329dc885302fbce9fa81c23b4dc49c7c82d29b52957847898676c89aa5d32b5b0e1c0d5a2b79a19d67562f407f19425687971a957375879d90c5f57c857136c17106c9ab1b99d80e69c8c954ed386493368884b55c939b8d64d26f643e800c56f90c01079d7c534e3b2b7ae352cefd3016da55f6a85eb803b85e2304915fd2001f77c74e28746293c46e4f5f0fd49cf988aafd0026b8e7a3bab2da5cdce1ea26c2e29ec03f4807fac432662b2d6c060be1c7be0e5489de69d0a6e03a4b9117f9244b34a0f1ecba89884f781c6320412413a00c4980287409a2a78c2cd7e65cecebbe4ec1c28cac4dd95f6998e78fc6f1392384331c9436aa10e10e2bf8ad2c4eafbcf276aa7bae64b74428911b3269c749338b0fc5075ad",
@ -331,7 +328,7 @@ fn modexp_nagydani_5_square(b: &mut Criterion) {
}
fn modexp_nagydani_5_qube(b: &mut Criterion) {
bench(
bench(
"modexp_nagydani_5_qube",
"0000000000000000000000000000000000000005", // modexp
"000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000400c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf03e30049201ec12937e7ce79d0f55d9c810e20acf52212aca1d3888949e0e4830aad88d804161230eb89d4d329cc83570fe257217d2119134048dd2ed167646975fc7d77136919a049ea74cf08ddd2b896890bb24a0ba18094a22baa351bf29ad96c66bbb1a598f2ca391749620e62d61c3561a7d3653ccc8892c7b99baaf76bf836e2991cb06d6bc0514568ff0d1ec8bb4b3d6984f5eaefb17d3ea2893722375d3ddb8e389a8eef7d7d198f8e687d6a513983df906099f9a2d23f4f9dec6f8ef2f11fc0a21fac45353b94e00486f5e17d386af42502d09db33cf0cf28310e049c07e88682aeeb00cb833c5174266e62407a57583f1f88b304b7c6e0c84bbe1c0fd423072d37a5bd0aacf764229e5c7cd02473460ba3645cd8e8ae144065bf02d0dd238593d8e230354f67e0b2f23012c23274f80e3ee31e35e2606a4a3f31d94ab755e6d163cff52cbb36b6d0cc67ffc512aeed1dce4d7a0d70ce82f2baba12e8d514dc92a056f994adfb17b5b9712bd5186f27a2fda1f7039c5df2c8587fdc62f5627580c13234b55be4df3056050e2d1ef3218f0dd66cb05265fe1acfb0989d8213f2c19d1735a7cf3fa65d88dad5af52dc2bba22b7abf46c3bc77b5091baab9e8f0ddc4d5e581037de91a9f8dcbc69309be29cc815cf19a20a7585b8b3073edf51fc9baeb3e509b97fa4ecfd621e0fd57bd61cac1b895c03248ff12bdbc57509250df3517e8a3fe1d776836b34ab352b973d932ef708b14f7418f9eceb1d87667e61e3e758649cb083f01b133d37ab2f5afa96d6c84bcacf4efc3851ad308c1e7d9113624fce29fab460ab9d2a48d92cdb281103a5250ad44cb2ff6e67ac670c02fdafb3e0f1353953d6d7d5646ca1568dea55275a050ec501b7c6250444f7219f1ba7521ba3b93d089727ca5f3bbe0d6c1300b423377004954c5628fdb65770b18ced5c9b23a4a5a6d6ef25fe01b4ce278de0bcc4ed86e28a0a68818ffa40970128cf2c38740e80037984428c1bd5113f40ff47512ee6f4e4d8f9b8e8e1b3040d2928d003bd1c1329dc885302fbce9fa81c23b4dc49c7c82d29b52957847898676c89aa5d32b5b0e1c0d5a2b79a19d67562f407f19425687971a957375879d90c5f57c857136c17106c9ab1b99d80e69c8c954ed386493368884b55c939b8d64d26f643e800c56f90c01079d7c534e3b2b7ae352cefd3016da55f6a85eb803b85e2304915fd2001f77c74e28746293c46e4f5f0fd49cf988aafd0026b8e7a3bab2da5cdce1ea26c2e29ec03f4807fac432662b2d6c060be1c7be0e5489de69d0a6e03a4b9117f9244b34a0f1ecba89884f781c6320412413a00c4980287409a2a78c2cd7e65cecebbe4ec1c28cac4dd95f6998e78fc6f1392384331c9436aa10e10e2bf8ad2c4eafbcf276aa7bae64b74428911b3269c749338b0fc5075ad",
@ -341,7 +338,7 @@ fn modexp_nagydani_5_qube(b: &mut Criterion) {
}
fn modexp_nagydani_5_pow0x10001(b: &mut Criterion) {
bench(
bench(
"modexp_nagydani_5_pow0x10001",
"0000000000000000000000000000000000000005", // modexp
"000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000400c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf010001e30049201ec12937e7ce79d0f55d9c810e20acf52212aca1d3888949e0e4830aad88d804161230eb89d4d329cc83570fe257217d2119134048dd2ed167646975fc7d77136919a049ea74cf08ddd2b896890bb24a0ba18094a22baa351bf29ad96c66bbb1a598f2ca391749620e62d61c3561a7d3653ccc8892c7b99baaf76bf836e2991cb06d6bc0514568ff0d1ec8bb4b3d6984f5eaefb17d3ea2893722375d3ddb8e389a8eef7d7d198f8e687d6a513983df906099f9a2d23f4f9dec6f8ef2f11fc0a21fac45353b94e00486f5e17d386af42502d09db33cf0cf28310e049c07e88682aeeb00cb833c5174266e62407a57583f1f88b304b7c6e0c84bbe1c0fd423072d37a5bd0aacf764229e5c7cd02473460ba3645cd8e8ae144065bf02d0dd238593d8e230354f67e0b2f23012c23274f80e3ee31e35e2606a4a3f31d94ab755e6d163cff52cbb36b6d0cc67ffc512aeed1dce4d7a0d70ce82f2baba12e8d514dc92a056f994adfb17b5b9712bd5186f27a2fda1f7039c5df2c8587fdc62f5627580c13234b55be4df3056050e2d1ef3218f0dd66cb05265fe1acfb0989d8213f2c19d1735a7cf3fa65d88dad5af52dc2bba22b7abf46c3bc77b5091baab9e8f0ddc4d5e581037de91a9f8dcbc69309be29cc815cf19a20a7585b8b3073edf51fc9baeb3e509b97fa4ecfd621e0fd57bd61cac1b895c03248ff12bdbc57509250df3517e8a3fe1d776836b34ab352b973d932ef708b14f7418f9eceb1d87667e61e3e758649cb083f01b133d37ab2f5afa96d6c84bcacf4efc3851ad308c1e7d9113624fce29fab460ab9d2a48d92cdb281103a5250ad44cb2ff6e67ac670c02fdafb3e0f1353953d6d7d5646ca1568dea55275a050ec501b7c6250444f7219f1ba7521ba3b93d089727ca5f3bbe0d6c1300b423377004954c5628fdb65770b18ced5c9b23a4a5a6d6ef25fe01b4ce278de0bcc4ed86e28a0a68818ffa40970128cf2c38740e80037984428c1bd5113f40ff47512ee6f4e4d8f9b8e8e1b3040d2928d003bd1c1329dc885302fbce9fa81c23b4dc49c7c82d29b52957847898676c89aa5d32b5b0e1c0d5a2b79a19d67562f407f19425687971a957375879d90c5f57c857136c17106c9ab1b99d80e69c8c954ed386493368884b55c939b8d64d26f643e800c56f90c01079d7c534e3b2b7ae352cefd3016da55f6a85eb803b85e2304915fd2001f77c74e28746293c46e4f5f0fd49cf988aafd0026b8e7a3bab2da5cdce1ea26c2e29ec03f4807fac432662b2d6c060be1c7be0e5489de69d0a6e03a4b9117f9244b34a0f1ecba89884f781c6320412413a00c4980287409a2a78c2cd7e65cecebbe4ec1c28cac4dd95f6998e78fc6f1392384331c9436aa10e10e2bf8ad2c4eafbcf276aa7bae64b74428911b3269c749338b0fc5075ad",
@ -351,7 +348,7 @@ fn modexp_nagydani_5_pow0x10001(b: &mut Criterion) {
}
fn alt_bn128_add_chfast1(b: &mut Criterion) {
bench(
bench(
"alt_bn128_add_chfast1",
"0000000000000000000000000000000000000006", // alt_bn128_add
"18b18acfb4c2c30276db5411368e7185b311dd124691610c5d3b74034e093dc9063c909c4720840cb5134cb9f59fa749755796819658d32efc0d288198f3726607c2b7f58a84bd6145f00c9c2bc0bb1a187f20ff2c92963a88019e7c6a014eed06614e20c147e940f2d70da3f74c9a17df361706a4485c742bd6788478fa17d7",
@ -361,7 +358,7 @@ fn alt_bn128_add_chfast1(b: &mut Criterion) {
}
fn alt_bn128_add_chfast2(b: &mut Criterion) {
bench(
bench(
"alt_bn128_add_chfast2",
"0000000000000000000000000000000000000006", // alt_bn128_add
"2243525c5efd4b9c3d3c45ac0ca3fe4dd85e830a4ce6b65fa1eeaee202839703301d1d33be6da8e509df21cc35964723180eed7532537db9ae5e7d48f195c91518b18acfb4c2c30276db5411368e7185b311dd124691610c5d3b74034e093dc9063c909c4720840cb5134cb9f59fa749755796819658d32efc0d288198f37266",
@ -371,7 +368,7 @@ fn alt_bn128_add_chfast2(b: &mut Criterion) {
}
fn alt_bn128_add_cdetrio1(b: &mut Criterion) {
bench(
bench(
"alt_bn128_add_cdetrio1",
"0000000000000000000000000000000000000006", // alt_bn128_add
"0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
@ -381,7 +378,7 @@ fn alt_bn128_add_cdetrio1(b: &mut Criterion) {
}
fn alt_bn128_add_cdetrio2(b: &mut Criterion) {
bench(
bench(
"alt_bn128_add_cdetrio2",
"0000000000000000000000000000000000000006", // alt_bn128_add
"00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
@ -391,7 +388,7 @@ fn alt_bn128_add_cdetrio2(b: &mut Criterion) {
}
fn alt_bn128_add_cdetrio3(b: &mut Criterion) {
bench(
bench(
"alt_bn128_add_cdetrio3",
"0000000000000000000000000000000000000006", // alt_bn128_add
"0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
@ -401,7 +398,7 @@ fn alt_bn128_add_cdetrio3(b: &mut Criterion) {
}
fn alt_bn128_add_cdetrio4(b: &mut Criterion) {
bench(
bench(
"alt_bn128_add_cdetrio4",
"0000000000000000000000000000000000000006", // alt_bn128_add
"",
@ -411,7 +408,7 @@ fn alt_bn128_add_cdetrio4(b: &mut Criterion) {
}
fn alt_bn128_add_cdetrio5(b: &mut Criterion) {
bench(
bench(
"alt_bn128_add_cdetrio5",
"0000000000000000000000000000000000000006", // alt_bn128_add
"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
@ -421,7 +418,7 @@ fn alt_bn128_add_cdetrio5(b: &mut Criterion) {
}
fn alt_bn128_add_cdetrio6(b: &mut Criterion) {
bench(
bench(
"alt_bn128_add_cdetrio6",
"0000000000000000000000000000000000000006", // alt_bn128_add
"0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
@ -431,7 +428,7 @@ fn alt_bn128_add_cdetrio6(b: &mut Criterion) {
}
fn alt_bn128_add_cdetrio7(b: &mut Criterion) {
bench(
bench(
"alt_bn128_add_cdetrio7",
"0000000000000000000000000000000000000006", // alt_bn128_add
"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
@ -441,7 +438,7 @@ fn alt_bn128_add_cdetrio7(b: &mut Criterion) {
}
fn alt_bn128_add_cdetrio8(b: &mut Criterion) {
bench(
bench(
"alt_bn128_add_cdetrio8",
"0000000000000000000000000000000000000006", // alt_bn128_add
"00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
@ -451,7 +448,7 @@ fn alt_bn128_add_cdetrio8(b: &mut Criterion) {
}
fn alt_bn128_add_cdetrio9(b: &mut Criterion) {
bench(
bench(
"alt_bn128_add_cdetrio9",
"0000000000000000000000000000000000000006", // alt_bn128_add
"0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
@ -461,7 +458,7 @@ fn alt_bn128_add_cdetrio9(b: &mut Criterion) {
}
fn alt_bn128_add_cdetrio10(b: &mut Criterion) {
bench(
bench(
"alt_bn128_add_cdetrio10",
"0000000000000000000000000000000000000006", // alt_bn128_add
"000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
@ -471,7 +468,7 @@ fn alt_bn128_add_cdetrio10(b: &mut Criterion) {
}
fn alt_bn128_add_cdetrio11(b: &mut Criterion) {
bench(
bench(
"alt_bn128_add_cdetrio11",
"0000000000000000000000000000000000000006", // alt_bn128_add
"0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
@ -481,7 +478,7 @@ fn alt_bn128_add_cdetrio11(b: &mut Criterion) {
}
fn alt_bn128_add_cdetrio12(b: &mut Criterion) {
bench(
bench(
"alt_bn128_add_cdetrio12",
"0000000000000000000000000000000000000006", // alt_bn128_add
"000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
@ -491,7 +488,7 @@ fn alt_bn128_add_cdetrio12(b: &mut Criterion) {
}
fn alt_bn128_add_cdetrio13(b: &mut Criterion) {
bench(
bench(
"alt_bn128_add_cdetrio13",
"0000000000000000000000000000000000000006", // alt_bn128_add
"17c139df0efee0f766bc0204762b774362e4ded88953a39ce849a8a7fa163fa901e0559bacb160664764a357af8a9fe70baa9258e0b959273ffc5718c6d4cc7c039730ea8dff1254c0fee9c0ea777d29a9c710b7e616683f194f18c43b43b869073a5ffcc6fc7a28c30723d6e58ce577356982d65b833a5a5c15bf9024b43d98",
@ -501,7 +498,7 @@ fn alt_bn128_add_cdetrio13(b: &mut Criterion) {
}
fn alt_bn128_add_cdetrio14(b: &mut Criterion) {
bench(
bench(
"alt_bn128_add_cdetrio14",
"0000000000000000000000000000000000000006", // alt_bn128_add
"17c139df0efee0f766bc0204762b774362e4ded88953a39ce849a8a7fa163fa901e0559bacb160664764a357af8a9fe70baa9258e0b959273ffc5718c6d4cc7c17c139df0efee0f766bc0204762b774362e4ded88953a39ce849a8a7fa163fa92e83f8d734803fc370eba25ed1f6b8768bd6d83887b87165fc2434fe11a830cb00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
@ -511,7 +508,7 @@ fn alt_bn128_add_cdetrio14(b: &mut Criterion) {
}
fn alt_bn128_mul_chfast1(b: &mut Criterion) {
bench(
bench(
"alt_bn128_mul_chfast1",
"0000000000000000000000000000000000000007", // alt_bn128_mul
"2bd3e6d0f3b142924f5ca7b49ce5b9d54c4703d7ae5648e61d02268b1a0a9fb721611ce0a6af85915e2f1d70300909ce2e49dfad4a4619c8390cae66cefdb20400000000000000000000000000000000000000000000000011138ce750fa15c2",
@ -521,7 +518,7 @@ fn alt_bn128_mul_chfast1(b: &mut Criterion) {
}
fn alt_bn128_mul_chfast2(b: &mut Criterion) {
bench(
bench(
"alt_bn128_mul_chfast2",
"0000000000000000000000000000000000000007", // alt_bn128_mul
"070a8d6a982153cae4be29d434e8faef8a47b274a053f5a4ee2a6c9c13c31e5c031b8ce914eba3a9ffb989f9cdd5b0f01943074bf4f0f315690ec3cec6981afc30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd46",
@ -531,7 +528,7 @@ fn alt_bn128_mul_chfast2(b: &mut Criterion) {
}
fn alt_bn128_mul_chfast3(b: &mut Criterion) {
bench(
bench(
"alt_bn128_mul_chfast3",
"0000000000000000000000000000000000000007", // alt_bn128_mul
"025a6f4181d2b4ea8b724290ffb40156eb0adb514c688556eb79cdea0752c2bb2eff3f31dea215f1eb86023a133a996eb6300b44da664d64251d05381bb8a02e183227397098d014dc2822db40c0ac2ecbc0b548b438e5469e10460b6c3e7ea3",
@ -541,7 +538,7 @@ fn alt_bn128_mul_chfast3(b: &mut Criterion) {
}
fn alt_bn128_mul_cdetrio1(b: &mut Criterion) {
bench(
bench(
"alt_bn128_mul_cdetrio1",
"0000000000000000000000000000000000000007", // alt_bn128_mul
"1a87b0584ce92f4593d161480614f2989035225609f08058ccfa3d0f940febe31a2f3c951f6dadcc7ee9007dff81504b0fcd6d7cf59996efdc33d92bf7f9f8f6ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
@ -551,7 +548,7 @@ fn alt_bn128_mul_cdetrio1(b: &mut Criterion) {
}
fn alt_bn128_mul_cdetrio6(b: &mut Criterion) {
bench(
bench(
"alt_bn128_mul_cdetrio6",
"0000000000000000000000000000000000000007", // alt_bn128_mul
"17c139df0efee0f766bc0204762b774362e4ded88953a39ce849a8a7fa163fa901e0559bacb160664764a357af8a9fe70baa9258e0b959273ffc5718c6d4cc7cffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
@ -561,7 +558,7 @@ fn alt_bn128_mul_cdetrio6(b: &mut Criterion) {
}
fn alt_bn128_mul_cdetrio11(b: &mut Criterion) {
bench(
bench(
"alt_bn128_mul_cdetrio11",
"0000000000000000000000000000000000000007", // alt_bn128_mul
"039730ea8dff1254c0fee9c0ea777d29a9c710b7e616683f194f18c43b43b869073a5ffcc6fc7a28c30723d6e58ce577356982d65b833a5a5c15bf9024b43d98ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
@ -571,7 +568,7 @@ fn alt_bn128_mul_cdetrio11(b: &mut Criterion) {
}
fn alt_bn128_pairing_jeff1(b: &mut Criterion) {
bench(
bench(
"alt_bn128_pairing_jeff1",
"0000000000000000000000000000000000000008", // alt_bn128_pairing
"1c76476f4def4bb94541d57ebba1193381ffa7aa76ada664dd31c16024c43f593034dd2920f673e204fee2811c678745fc819b55d3e9d294e45c9b03a76aef41209dd15ebff5d46c4bd888e51a93cf99a7329636c63514396b4a452003a35bf704bf11ca01483bfa8b34b43561848d28905960114c8ac04049af4b6315a416782bb8324af6cfc93537a2ad1a445cfd0ca2a71acd7ac41fadbf933c2a51be344d120a2a4cf30c1bf9845f20c6fe39e07ea2cce61f0c9bb048165fe5e4de877550111e129f1cf1097710d41c4ac70fcdfa5ba2023c6ff1cbeac322de49d1b6df7c2032c61a830e3c17286de9462bf242fca2883585b93870a73853face6a6bf411198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa",
@ -581,7 +578,7 @@ fn alt_bn128_pairing_jeff1(b: &mut Criterion) {
}
fn alt_bn128_pairing_jeff2(b: &mut Criterion) {
bench(
bench(
"alt_bn128_pairing_jeff2",
"0000000000000000000000000000000000000008", // alt_bn128_pairing
"2eca0c7238bf16e83e7a1e6c5d49540685ff51380f309842a98561558019fc0203d3260361bb8451de5ff5ecd17f010ff22f5c31cdf184e9020b06fa5997db841213d2149b006137fcfb23036606f848d638d576a120ca981b5b1a5f9300b3ee2276cf730cf493cd95d64677bbb75fc42db72513a4c1e387b476d056f80aa75f21ee6226d31426322afcda621464d0611d226783262e21bb3bc86b537e986237096df1f82dff337dd5972e32a8ad43e28a78a96a823ef1cd4debe12b6552ea5f06967a1237ebfeca9aaae0d6d0bab8e28c198c5a339ef8a2407e31cdac516db922160fa257a5fd5b280642ff47b65eca77e626cb685c84fa6d3b6882a283ddd1198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa",
@ -591,7 +588,7 @@ fn alt_bn128_pairing_jeff2(b: &mut Criterion) {
}
fn alt_bn128_pairing_jeff3(b: &mut Criterion) {
bench(
bench(
"alt_bn128_pairing_jeff3",
"0000000000000000000000000000000000000008", // alt_bn128_pairing
"0f25929bcb43d5a57391564615c9e70a992b10eafa4db109709649cf48c50dd216da2f5cb6be7a0aa72c440c53c9bbdfec6c36c7d515536431b3a865468acbba2e89718ad33c8bed92e210e81d1853435399a271913a6520736a4729cf0d51eb01a9e2ffa2e92599b68e44de5bcf354fa2642bd4f26b259daa6f7ce3ed57aeb314a9a87b789a58af499b314e13c3d65bede56c07ea2d418d6874857b70763713178fb49a2d6cd347dc58973ff49613a20757d0fcc22079f9abd10c3baee245901b9e027bd5cfc2cb5db82d4dc9677ac795ec500ecd47deee3b5da006d6d049b811d7511c78158de484232fc68daf8a45cf217d1c2fae693ff5871e8752d73b21198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa",
@ -601,7 +598,7 @@ fn alt_bn128_pairing_jeff3(b: &mut Criterion) {
}
fn alt_bn128_pairing_jeff4(b: &mut Criterion) {
bench(
bench(
"alt_bn128_pairing_jeff4",
"0000000000000000000000000000000000000008", // alt_bn128_pairing
"2f2ea0b3da1e8ef11914acf8b2e1b32d99df51f5f4f206fc6b947eae860eddb6068134ddb33dc888ef446b648d72338684d678d2eb2371c61a50734d78da4b7225f83c8b6ab9de74e7da488ef02645c5a16a6652c3c71a15dc37fe3a5dcb7cb122acdedd6308e3bb230d226d16a105295f523a8a02bfc5e8bd2da135ac4c245d065bbad92e7c4e31bf3757f1fe7362a63fbfee50e7dc68da116e67d600d9bf6806d302580dc0661002994e7cd3a7f224e7ddc27802777486bf80f40e4ca3cfdb186bac5188a98c45e6016873d107f5cd131f3a3e339d0375e58bd6219347b008122ae2b09e539e152ec5364e7e2204b03d11d3caa038bfc7cd499f8176aacbee1f39e4e4afc4bc74790a4a028aff2c3d2538731fb755edefd8cb48d6ea589b5e283f150794b6736f670d6a1033f9b46c6f5204f50813eb85c8dc4b59db1c5d39140d97ee4d2b36d99bc49974d18ecca3e7ad51011956051b464d9e27d46cc25e0764bb98575bd466d32db7b15f582b2d5c452b36aa394b789366e5e3ca5aabd415794ab061441e51d01e94640b7e3084a07e02c78cf3103c542bc5b298669f211b88da1679b0b64a63b7e0e7bfe52aae524f73a55be7fe70c7e9bfc94b4cf0da1213d2149b006137fcfb23036606f848d638d576a120ca981b5b1a5f9300b3ee2276cf730cf493cd95d64677bbb75fc42db72513a4c1e387b476d056f80aa75f21ee6226d31426322afcda621464d0611d226783262e21bb3bc86b537e986237096df1f82dff337dd5972e32a8ad43e28a78a96a823ef1cd4debe12b6552ea5f",
@ -611,7 +608,7 @@ fn alt_bn128_pairing_jeff4(b: &mut Criterion) {
}
fn alt_bn128_pairing_jeff5(b: &mut Criterion) {
bench(
bench(
"alt_bn128_pairing_jeff5",
"0000000000000000000000000000000000000008", // alt_bn128_pairing
"20a754d2071d4d53903e3b31a7e98ad6882d58aec240ef981fdf0a9d22c5926a29c853fcea789887315916bbeb89ca37edb355b4f980c9a12a94f30deeed30211213d2149b006137fcfb23036606f848d638d576a120ca981b5b1a5f9300b3ee2276cf730cf493cd95d64677bbb75fc42db72513a4c1e387b476d056f80aa75f21ee6226d31426322afcda621464d0611d226783262e21bb3bc86b537e986237096df1f82dff337dd5972e32a8ad43e28a78a96a823ef1cd4debe12b6552ea5f1abb4a25eb9379ae96c84fff9f0540abcfc0a0d11aeda02d4f37e4baf74cb0c11073b3ff2cdbb38755f8691ea59e9606696b3ff278acfc098fa8226470d03869217cee0a9ad79a4493b5253e2e4e3a39fc2df38419f230d341f60cb064a0ac290a3d76f140db8418ba512272381446eb73958670f00cf46f1d9e64cba057b53c26f64a8ec70387a13e41430ed3ee4a7db2059cc5fc13c067194bcc0cb49a98552fd72bd9edb657346127da132e5b82ab908f5816c826acb499e22f2412d1a2d70f25929bcb43d5a57391564615c9e70a992b10eafa4db109709649cf48c50dd2198a1f162a73261f112401aa2db79c7dab1533c9935c77290a6ce3b191f2318d198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa",
@ -621,7 +618,7 @@ fn alt_bn128_pairing_jeff5(b: &mut Criterion) {
}
fn alt_bn128_pairing_jeff6(b: &mut Criterion) {
bench(
bench(
"alt_bn128_pairing_jeff6",
"0000000000000000000000000000000000000008", // alt_bn128_pairing
"1c76476f4def4bb94541d57ebba1193381ffa7aa76ada664dd31c16024c43f593034dd2920f673e204fee2811c678745fc819b55d3e9d294e45c9b03a76aef41209dd15ebff5d46c4bd888e51a93cf99a7329636c63514396b4a452003a35bf704bf11ca01483bfa8b34b43561848d28905960114c8ac04049af4b6315a416782bb8324af6cfc93537a2ad1a445cfd0ca2a71acd7ac41fadbf933c2a51be344d120a2a4cf30c1bf9845f20c6fe39e07ea2cce61f0c9bb048165fe5e4de877550111e129f1cf1097710d41c4ac70fcdfa5ba2023c6ff1cbeac322de49d1b6df7c103188585e2364128fe25c70558f1560f4f9350baf3959e603cc91486e110936198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa",
@ -631,17 +628,17 @@ fn alt_bn128_pairing_jeff6(b: &mut Criterion) {
}
fn alt_bn128_pairing_empty_data(b: &mut Criterion) {
bench(
"alt_bn128_pairing_empty_data",
"0000000000000000000000000000000000000008", // alt_bn128_pairing
"",
"0000000000000000000000000000000000000000000000000000000000000001",
b,
);
bench(
"alt_bn128_pairing_empty_data",
"0000000000000000000000000000000000000008", // alt_bn128_pairing
"",
"0000000000000000000000000000000000000000000000000000000000000001",
b,
);
}
fn alt_bn128_pairing_one_point(b: &mut Criterion) {
bench(
bench(
"alt_bn128_pairing_one_point",
"0000000000000000000000000000000000000008", // alt_bn128_pairing
"00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa",
@ -651,7 +648,7 @@ fn alt_bn128_pairing_one_point(b: &mut Criterion) {
}
fn alt_bn128_pairing_two_point_match_2(b: &mut Criterion) {
bench(
bench(
"alt_bn128_pairing_two_point_match_2",
"0000000000000000000000000000000000000008", // alt_bn128_pairing
"00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d",
@ -661,7 +658,7 @@ fn alt_bn128_pairing_two_point_match_2(b: &mut Criterion) {
}
fn alt_bn128_pairing_two_point_match_3(b: &mut Criterion) {
bench(
bench(
"alt_bn128_pairing_two_point_match_3",
"0000000000000000000000000000000000000008", // alt_bn128_pairing
"00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa",
@ -671,7 +668,7 @@ fn alt_bn128_pairing_two_point_match_3(b: &mut Criterion) {
}
fn alt_bn128_pairing_two_point_match_4(b: &mut Criterion) {
bench(
bench(
"alt_bn128_pairing_two_point_match_4",
"0000000000000000000000000000000000000008", // alt_bn128_pairing
"105456a333e6d636854f987ea7bb713dfd0ae8371a72aea313ae0c32c0bf10160cf031d41b41557f3e7e3ba0c51bebe5da8e6ecd855ec50fc87efcdeac168bcc0476be093a6d2b4bbf907172049874af11e1b6267606e00804d3ff0037ec57fd3010c68cb50161b7d1d96bb71edfec9880171954e56871abf3d93cc94d745fa114c059d74e5b6c4ec14ae5864ebe23a71781d86c29fb8fb6cce94f70d3de7a2101b33461f39d9e887dbb100f170a2345dde3c07e256d1dfa2b657ba5cd030427000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000021a2c3013d2ea92e13c800cde68ef56a294b883f6ac35d25f587c09b1b3c635f7290158a80cd3d66530f74dc94c94adb88f5cdb481acca997b6e60071f08a115f2f997f3dbd66a7afe07fe7862ce239edba9e05c5afff7f8a1259c9733b2dfbb929d1691530ca701b4a106054688728c9972c8512e9789e9567aae23e302ccd75",
@ -681,7 +678,7 @@ fn alt_bn128_pairing_two_point_match_4(b: &mut Criterion) {
}
fn alt_bn128_pairing_ten_point_match_1(b: &mut Criterion) {
bench(
bench(
"alt_bn128_pairing_ten_point_match_1",
"0000000000000000000000000000000000000008", // alt_bn128_pairing
"00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d",
@ -691,7 +688,7 @@ fn alt_bn128_pairing_ten_point_match_1(b: &mut Criterion) {
}
fn alt_bn128_pairing_ten_point_match_2(b: &mut Criterion) {
bench(
bench(
"alt_bn128_pairing_ten_point_match_2",
"0000000000000000000000000000000000000008", // alt_bn128_pairing
"00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa",
@ -701,7 +698,7 @@ fn alt_bn128_pairing_ten_point_match_2(b: &mut Criterion) {
}
fn alt_bn128_pairing_ten_point_match_3(b: &mut Criterion) {
bench(
bench(
"alt_bn128_pairing_ten_point_match_3",
"0000000000000000000000000000000000000008", // alt_bn128_pairing
"105456a333e6d636854f987ea7bb713dfd0ae8371a72aea313ae0c32c0bf10160cf031d41b41557f3e7e3ba0c51bebe5da8e6ecd855ec50fc87efcdeac168bcc0476be093a6d2b4bbf907172049874af11e1b6267606e00804d3ff0037ec57fd3010c68cb50161b7d1d96bb71edfec9880171954e56871abf3d93cc94d745fa114c059d74e5b6c4ec14ae5864ebe23a71781d86c29fb8fb6cce94f70d3de7a2101b33461f39d9e887dbb100f170a2345dde3c07e256d1dfa2b657ba5cd030427000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000021a2c3013d2ea92e13c800cde68ef56a294b883f6ac35d25f587c09b1b3c635f7290158a80cd3d66530f74dc94c94adb88f5cdb481acca997b6e60071f08a115f2f997f3dbd66a7afe07fe7862ce239edba9e05c5afff7f8a1259c9733b2dfbb929d1691530ca701b4a106054688728c9972c8512e9789e9567aae23e302ccd75",

View File

@ -16,8 +16,7 @@
use ethereum_types::{H256, U256};
use common_types::{encoded, BlockNumber};
use common_types::header::Header;
use common_types::{encoded, header::Header, BlockNumber};
/// Contains information on a best block that is specific to the consensus engine.
///
@ -26,19 +25,19 @@ use common_types::header::Header;
///
/// Sometimes refered as 'latest block'.
pub struct BestBlock {
/// Best block decoded header.
pub header: Header,
/// Best block uncompressed bytes.
pub block: encoded::Block,
/// Best block total difficulty.
pub total_difficulty: U256,
/// Best block decoded header.
pub header: Header,
/// Best block uncompressed bytes.
pub block: encoded::Block,
/// Best block total difficulty.
pub total_difficulty: U256,
}
/// Best ancient block info. If the blockchain has a gap this keeps track of where it starts.
#[derive(Default)]
pub struct BestAncientBlock {
/// Best block hash.
pub hash: H256,
/// Best block number.
pub number: BlockNumber,
/// Best block hash.
pub hash: H256,
/// Best block number.
pub number: BlockNumber,
}

View File

@ -14,41 +14,41 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use ethereum_types::{H256, U256};
use common_types::BlockNumber;
use ethereum_types::{H256, U256};
/// Brief info about inserted block.
#[derive(Clone)]
pub struct BlockInfo {
/// Block hash.
pub hash: H256,
/// Block number.
pub number: BlockNumber,
/// Total block difficulty.
pub total_difficulty: U256,
/// Block location in blockchain.
pub location: BlockLocation
/// Block hash.
pub hash: H256,
/// Block number.
pub number: BlockNumber,
/// Total block difficulty.
pub total_difficulty: U256,
/// Block location in blockchain.
pub location: BlockLocation,
}
/// Describes location of newly inserted block.
#[derive(Debug, Clone, PartialEq)]
pub enum BlockLocation {
/// It's part of the canon chain.
CanonChain,
/// It's not a part of the canon chain.
Branch,
/// It's part of the fork which should become canon chain,
/// because its total difficulty is higher than current
/// canon chain difficulty.
BranchBecomingCanonChain(BranchBecomingCanonChainData),
/// It's part of the canon chain.
CanonChain,
/// It's not a part of the canon chain.
Branch,
/// It's part of the fork which should become canon chain,
/// because its total difficulty is higher than current
/// canon chain difficulty.
BranchBecomingCanonChain(BranchBecomingCanonChainData),
}
#[derive(Debug, Clone, PartialEq)]
pub struct BranchBecomingCanonChainData {
/// Hash of the newest common ancestor with old canon chain.
pub ancestor: H256,
/// Hashes of the blocks between ancestor and this block.
pub enacted: Vec<H256>,
/// Hashes of the blocks which were invalidated.
pub retracted: Vec<H256>,
/// Hash of the newest common ancestor with old canon chain.
pub ancestor: H256,
/// Hashes of the blocks between ancestor and this block.
pub enacted: Vec<H256>,
/// Hashes of the blocks which were invalidated.
pub retracted: Vec<H256>,
}

File diff suppressed because it is too large Load Diff

View File

@ -17,19 +17,19 @@
/// Represents blockchain's in-memory cache size in bytes.
#[derive(Debug)]
pub struct CacheSize {
/// Blocks cache size.
pub blocks: usize,
/// BlockDetails cache size.
pub block_details: usize,
/// Transaction addresses cache size.
pub transaction_addresses: usize,
/// Block receipts size.
pub block_receipts: usize,
/// Blocks cache size.
pub blocks: usize,
/// BlockDetails cache size.
pub block_details: usize,
/// Transaction addresses cache size.
pub transaction_addresses: usize,
/// Block receipts size.
pub block_receipts: usize,
}
impl CacheSize {
/// Total amount used by the cache.
pub fn total(&self) -> usize {
self.blocks + self.block_details + self.transaction_addresses + self.block_receipts
}
/// Total amount used by the cache.
pub fn total(&self) -> usize {
self.blocks + self.block_details + self.transaction_addresses + self.block_receipts
}
}

View File

@ -19,17 +19,17 @@
/// Blockchain configuration.
#[derive(Debug, PartialEq, Clone)]
pub struct Config {
/// Preferred cache size in bytes.
pub pref_cache_size: usize,
/// Maximum cache size in bytes.
pub max_cache_size: usize,
/// Preferred cache size in bytes.
pub pref_cache_size: usize,
/// Maximum cache size in bytes.
pub max_cache_size: usize,
}
impl Default for Config {
fn default() -> Self {
Config {
pref_cache_size: 1 << 14,
max_cache_size: 1 << 20,
}
}
fn default() -> Self {
Config {
pref_cache_size: 1 << 14,
max_cache_size: 1 << 20,
}
}
}

View File

@ -16,14 +16,16 @@
//! Blockchain generator for tests.
use ethereum_types::{Bloom, H256, U256};
use std::collections::VecDeque;
use ethereum_types::{U256, H256, Bloom};
use common_types::encoded;
use common_types::header::Header;
use common_types::transaction::{SignedTransaction, Transaction, Action};
use common_types::view;
use common_types::views::BlockView;
use common_types::{
encoded,
header::Header,
transaction::{Action, SignedTransaction, Transaction},
view,
views::BlockView,
};
use keccak_hash::keccak;
use rlp::encode;
use rlp_derive::RlpEncodable;
@ -32,247 +34,260 @@ use triehash_ethereum::ordered_trie_root;
/// Helper structure, used for encoding blocks.
#[derive(Default, Clone, RlpEncodable)]
pub struct Block {
/// Block header
pub header: Header,
/// Block transactions
pub transactions: Vec<SignedTransaction>,
/// Block uncles
pub uncles: Vec<Header>
/// Block header
pub header: Header,
/// Block transactions
pub transactions: Vec<SignedTransaction>,
/// Block uncles
pub uncles: Vec<Header>,
}
impl Block {
/// Get a copy of the header
#[inline]
pub fn header(&self) -> Header {
self.header.clone()
}
/// Get a copy of the header
#[inline]
pub fn header(&self) -> Header {
self.header.clone()
}
/// Get block hash
#[inline]
pub fn hash(&self) -> H256 {
view!(BlockView, &self.encoded().raw()).header_view().hash()
}
/// Get block hash
#[inline]
pub fn hash(&self) -> H256 {
view!(BlockView, &self.encoded().raw()).header_view().hash()
}
/// Get block number
#[inline]
pub fn number(&self) -> u64 {
self.header.number()
}
/// Get block number
#[inline]
pub fn number(&self) -> u64 {
self.header.number()
}
/// Get RLP encoding of this block
#[inline]
pub fn encoded(&self) -> encoded::Block {
encoded::Block::new(encode(self))
}
/// Get RLP encoding of this block
#[inline]
pub fn encoded(&self) -> encoded::Block {
encoded::Block::new(encode(self))
}
/// Get block difficulty
#[inline]
pub fn difficulty(&self) -> U256 {
*self.header.difficulty()
}
/// Get block difficulty
#[inline]
pub fn difficulty(&self) -> U256 {
*self.header.difficulty()
}
}
/// Specify block options for generator
#[derive(Debug)]
pub struct BlockOptions {
/// Difficulty
pub difficulty: U256,
/// Set bloom filter
pub bloom: Bloom,
/// Transactions included in blocks
pub transactions: Vec<SignedTransaction>,
/// Difficulty
pub difficulty: U256,
/// Set bloom filter
pub bloom: Bloom,
/// Transactions included in blocks
pub transactions: Vec<SignedTransaction>,
}
impl Default for BlockOptions {
fn default() -> Self {
BlockOptions {
difficulty: 10.into(),
bloom: Bloom::default(),
transactions: Vec::new(),
}
}
fn default() -> Self {
BlockOptions {
difficulty: 10.into(),
bloom: Bloom::default(),
transactions: Vec::new(),
}
}
}
/// Utility to create blocks
#[derive(Clone)]
pub struct BlockBuilder {
blocks: VecDeque<Block>,
blocks: VecDeque<Block>,
}
impl BlockBuilder {
/// Create new BlockBuilder starting at genesis.
pub fn genesis() -> Self {
let mut blocks = VecDeque::with_capacity(1);
blocks.push_back(Block::default());
/// Create new BlockBuilder starting at genesis.
pub fn genesis() -> Self {
let mut blocks = VecDeque::with_capacity(1);
blocks.push_back(Block::default());
BlockBuilder {
blocks,
}
}
BlockBuilder { blocks }
}
/// Add new block with default options.
#[inline]
pub fn add_block(&self) -> Self {
self.add_block_with(|| BlockOptions::default())
}
/// Add new block with default options.
#[inline]
pub fn add_block(&self) -> Self {
self.add_block_with(|| BlockOptions::default())
}
/// Add `count` number of blocks with default options.
#[inline]
pub fn add_blocks(&self, count: usize) -> Self {
self.add_blocks_with(count, || BlockOptions::default())
}
/// Add `count` number of blocks with default options.
#[inline]
pub fn add_blocks(&self, count: usize) -> Self {
self.add_blocks_with(count, || BlockOptions::default())
}
/// Add block with specified options.
#[inline]
pub fn add_block_with<T>(&self, get_metadata: T) -> Self where T: Fn() -> BlockOptions {
self.add_blocks_with(1, get_metadata)
}
/// Add block with specified options.
#[inline]
pub fn add_block_with<T>(&self, get_metadata: T) -> Self
where
T: Fn() -> BlockOptions,
{
self.add_blocks_with(1, get_metadata)
}
/// Add a block with given difficulty
#[inline]
pub fn add_block_with_difficulty<T>(&self, difficulty: T) -> Self where T: Into<U256> {
let difficulty = difficulty.into();
self.add_blocks_with(1, move || BlockOptions {
difficulty,
..Default::default()
})
}
/// Add a block with given difficulty
#[inline]
pub fn add_block_with_difficulty<T>(&self, difficulty: T) -> Self
where
T: Into<U256>,
{
let difficulty = difficulty.into();
self.add_blocks_with(1, move || BlockOptions {
difficulty,
..Default::default()
})
}
/// Add a block with randomly generated transactions.
#[inline]
pub fn add_block_with_random_transactions(&self) -> Self {
// Maximum of ~50 transactions
let count = rand::random::<u8>() as usize / 5;
let transactions = std::iter::repeat_with(|| {
let data_len = rand::random::<u8>();
let data = std::iter::repeat_with(|| rand::random::<u8>())
.take(data_len as usize)
.collect::<Vec<_>>();
Transaction {
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Create,
value: 100.into(),
data,
}.sign(&keccak("").into(), None)
}).take(count);
/// Add a block with randomly generated transactions.
#[inline]
pub fn add_block_with_random_transactions(&self) -> Self {
// Maximum of ~50 transactions
let count = rand::random::<u8>() as usize / 5;
let transactions = std::iter::repeat_with(|| {
let data_len = rand::random::<u8>();
let data = std::iter::repeat_with(|| rand::random::<u8>())
.take(data_len as usize)
.collect::<Vec<_>>();
Transaction {
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Create,
value: 100.into(),
data,
}
.sign(&keccak("").into(), None)
})
.take(count);
self.add_block_with_transactions(transactions)
}
self.add_block_with_transactions(transactions)
}
/// Add a block with given transactions.
#[inline]
pub fn add_block_with_transactions<T>(&self, transactions: T) -> Self
where T: IntoIterator<Item = SignedTransaction> {
let transactions = transactions.into_iter().collect::<Vec<_>>();
self.add_blocks_with(1, || BlockOptions {
transactions: transactions.clone(),
..Default::default()
})
}
/// Add a block with given transactions.
#[inline]
pub fn add_block_with_transactions<T>(&self, transactions: T) -> Self
where
T: IntoIterator<Item = SignedTransaction>,
{
let transactions = transactions.into_iter().collect::<Vec<_>>();
self.add_blocks_with(1, || BlockOptions {
transactions: transactions.clone(),
..Default::default()
})
}
/// Add a block with given bloom filter.
#[inline]
pub fn add_block_with_bloom(&self, bloom: Bloom) -> Self {
self.add_blocks_with(1, move || BlockOptions {
bloom,
..Default::default()
})
}
/// Add a block with given bloom filter.
#[inline]
pub fn add_block_with_bloom(&self, bloom: Bloom) -> Self {
self.add_blocks_with(1, move || BlockOptions {
bloom,
..Default::default()
})
}
/// Add a bunch of blocks with given metadata.
pub fn add_blocks_with<T>(&self, count: usize, get_metadata: T) -> Self where T: Fn() -> BlockOptions {
assert!(count > 0, "There must be at least 1 block");
let mut parent_hash = self.last().hash();
let mut parent_number = self.last().number();
let mut blocks = VecDeque::with_capacity(count);
for _ in 0..count {
let mut block = Block::default();
let metadata = get_metadata();
let block_number = parent_number + 1;
let transactions = metadata.transactions;
let transactions_root = ordered_trie_root(transactions.iter().map(rlp::encode));
/// Add a bunch of blocks with given metadata.
pub fn add_blocks_with<T>(&self, count: usize, get_metadata: T) -> Self
where
T: Fn() -> BlockOptions,
{
assert!(count > 0, "There must be at least 1 block");
let mut parent_hash = self.last().hash();
let mut parent_number = self.last().number();
let mut blocks = VecDeque::with_capacity(count);
for _ in 0..count {
let mut block = Block::default();
let metadata = get_metadata();
let block_number = parent_number + 1;
let transactions = metadata.transactions;
let transactions_root = ordered_trie_root(transactions.iter().map(rlp::encode));
block.header.set_parent_hash(parent_hash);
block.header.set_number(block_number);
block.header.set_log_bloom(metadata.bloom);
block.header.set_difficulty(metadata.difficulty);
block.header.set_transactions_root(transactions_root);
block.transactions = transactions;
block.header.set_parent_hash(parent_hash);
block.header.set_number(block_number);
block.header.set_log_bloom(metadata.bloom);
block.header.set_difficulty(metadata.difficulty);
block.header.set_transactions_root(transactions_root);
block.transactions = transactions;
parent_hash = block.hash();
parent_number = block_number;
parent_hash = block.hash();
parent_number = block_number;
blocks.push_back(block);
}
blocks.push_back(block);
}
BlockBuilder {
blocks,
}
}
BlockBuilder { blocks }
}
/// Get a reference to the last generated block.
#[inline]
pub fn last(&self) -> &Block {
self.blocks.back().expect("There is always at least 1 block")
}
/// Get a reference to the last generated block.
#[inline]
pub fn last(&self) -> &Block {
self.blocks
.back()
.expect("There is always at least 1 block")
}
}
/// Generates a blockchain from given block builders (blocks will be concatenated).
#[derive(Clone)]
pub struct BlockGenerator {
builders: VecDeque<BlockBuilder>,
builders: VecDeque<BlockBuilder>,
}
impl BlockGenerator {
/// Create new block generator.
pub fn new<T>(builders: T) -> Self where T: IntoIterator<Item = BlockBuilder> {
BlockGenerator {
builders: builders.into_iter().collect(),
}
}
/// Create new block generator.
pub fn new<T>(builders: T) -> Self
where
T: IntoIterator<Item = BlockBuilder>,
{
BlockGenerator {
builders: builders.into_iter().collect(),
}
}
}
impl Iterator for BlockGenerator {
type Item = Block;
type Item = Block;
fn next(&mut self) -> Option<Self::Item> {
loop {
match self.builders.front_mut() {
Some(ref mut builder) => {
if let Some(block) = builder.blocks.pop_front() {
return Some(block);
}
},
None => return None,
}
self.builders.pop_front();
}
}
fn next(&mut self) -> Option<Self::Item> {
loop {
match self.builders.front_mut() {
Some(ref mut builder) => {
if let Some(block) = builder.blocks.pop_front() {
return Some(block);
}
}
None => return None,
}
self.builders.pop_front();
}
}
}
#[cfg(test)]
mod tests {
use super::{BlockBuilder, BlockOptions, BlockGenerator};
use super::{BlockBuilder, BlockGenerator, BlockOptions};
#[test]
fn test_block_builder() {
let genesis = BlockBuilder::genesis();
let block_1 = genesis.add_block();
let block_1001 = block_1.add_blocks(1000);
let block_1002 = block_1001.add_block_with(|| BlockOptions::default());
let generator = BlockGenerator::new(vec![genesis, block_1, block_1001, block_1002]);
assert_eq!(generator.count(), 1003);
}
#[test]
fn test_block_builder() {
let genesis = BlockBuilder::genesis();
let block_1 = genesis.add_block();
let block_1001 = block_1.add_blocks(1000);
let block_1002 = block_1001.add_block_with(|| BlockOptions::default());
let generator = BlockGenerator::new(vec![genesis, block_1, block_1001, block_1002]);
assert_eq!(generator.count(), 1003);
}
#[test]
fn test_block_builder_fork() {
let genesis = BlockBuilder::genesis();
let block_10a = genesis.add_blocks(10);
let block_11b = genesis.add_blocks(11);
assert_eq!(block_10a.last().number(), 10);
assert_eq!(block_11b.last().number(), 11);
}
#[test]
fn test_block_builder_fork() {
let genesis = BlockBuilder::genesis();
let block_10a = genesis.add_blocks(10);
let block_11b = genesis.add_blocks(11);
assert_eq!(block_10a.last().number(), 10);
assert_eq!(block_11b.last().number(), 11);
}
}

View File

@ -16,120 +16,132 @@
//! Import route.
use ethereum_types::H256;
use crate::block_info::{BlockInfo, BlockLocation};
use ethereum_types::H256;
/// Import route for newly inserted block.
#[derive(Debug, PartialEq, Clone)]
pub struct ImportRoute {
/// Blocks that were invalidated by new block.
pub retracted: Vec<H256>,
/// Blocks that were validated by new block.
pub enacted: Vec<H256>,
/// Blocks which are neither retracted nor enacted.
pub omitted: Vec<H256>,
/// Blocks that were invalidated by new block.
pub retracted: Vec<H256>,
/// Blocks that were validated by new block.
pub enacted: Vec<H256>,
/// Blocks which are neither retracted nor enacted.
pub omitted: Vec<H256>,
}
impl ImportRoute {
/// Empty import route.
pub fn none() -> Self {
ImportRoute {
retracted: vec![],
enacted: vec![],
omitted: vec![],
}
}
/// Empty import route.
pub fn none() -> Self {
ImportRoute {
retracted: vec![],
enacted: vec![],
omitted: vec![],
}
}
}
impl From<BlockInfo> for ImportRoute {
fn from(info: BlockInfo) -> ImportRoute {
match info.location {
BlockLocation::CanonChain => ImportRoute {
retracted: vec![],
enacted: vec![info.hash],
omitted: vec![],
},
BlockLocation::Branch => ImportRoute {
retracted: vec![],
enacted: vec![],
omitted: vec![info.hash],
},
BlockLocation::BranchBecomingCanonChain(mut data) => {
data.enacted.push(info.hash);
ImportRoute {
retracted: data.retracted,
enacted: data.enacted,
omitted: vec![],
}
}
}
}
fn from(info: BlockInfo) -> ImportRoute {
match info.location {
BlockLocation::CanonChain => ImportRoute {
retracted: vec![],
enacted: vec![info.hash],
omitted: vec![],
},
BlockLocation::Branch => ImportRoute {
retracted: vec![],
enacted: vec![],
omitted: vec![info.hash],
},
BlockLocation::BranchBecomingCanonChain(mut data) => {
data.enacted.push(info.hash);
ImportRoute {
retracted: data.retracted,
enacted: data.enacted,
omitted: vec![],
}
}
}
}
}
#[cfg(test)]
mod tests {
use ethereum_types::{H256, U256};
use crate::block_info::{BlockInfo, BlockLocation, BranchBecomingCanonChainData};
use super::ImportRoute;
use super::ImportRoute;
use crate::block_info::{BlockInfo, BlockLocation, BranchBecomingCanonChainData};
use ethereum_types::{H256, U256};
#[test]
fn import_route_none() {
assert_eq!(ImportRoute::none(), ImportRoute {
enacted: vec![],
retracted: vec![],
omitted: vec![],
});
}
#[test]
fn import_route_none() {
assert_eq!(
ImportRoute::none(),
ImportRoute {
enacted: vec![],
retracted: vec![],
omitted: vec![],
}
);
}
#[test]
fn import_route_branch() {
let info = BlockInfo {
hash: H256::from(U256::from(1)),
number: 0,
total_difficulty: U256::from(0),
location: BlockLocation::Branch,
};
#[test]
fn import_route_branch() {
let info = BlockInfo {
hash: H256::from(U256::from(1)),
number: 0,
total_difficulty: U256::from(0),
location: BlockLocation::Branch,
};
assert_eq!(ImportRoute::from(info), ImportRoute {
retracted: vec![],
enacted: vec![],
omitted: vec![H256::from(U256::from(1))],
});
}
assert_eq!(
ImportRoute::from(info),
ImportRoute {
retracted: vec![],
enacted: vec![],
omitted: vec![H256::from(U256::from(1))],
}
);
}
#[test]
fn import_route_canon_chain() {
let info = BlockInfo {
hash: H256::from(U256::from(1)),
number: 0,
total_difficulty: U256::from(0),
location: BlockLocation::CanonChain,
};
#[test]
fn import_route_canon_chain() {
let info = BlockInfo {
hash: H256::from(U256::from(1)),
number: 0,
total_difficulty: U256::from(0),
location: BlockLocation::CanonChain,
};
assert_eq!(ImportRoute::from(info), ImportRoute {
retracted: vec![],
enacted: vec![H256::from(U256::from(1))],
omitted: vec![],
});
}
assert_eq!(
ImportRoute::from(info),
ImportRoute {
retracted: vec![],
enacted: vec![H256::from(U256::from(1))],
omitted: vec![],
}
);
}
#[test]
fn import_route_branch_becoming_canon_chain() {
let info = BlockInfo {
hash: H256::from(U256::from(2)),
number: 0,
total_difficulty: U256::from(0),
location: BlockLocation::BranchBecomingCanonChain(BranchBecomingCanonChainData {
ancestor: H256::from(U256::from(0)),
enacted: vec![H256::from(U256::from(1))],
retracted: vec![H256::from(U256::from(3)), H256::from(U256::from(4))],
})
};
#[test]
fn import_route_branch_becoming_canon_chain() {
let info = BlockInfo {
hash: H256::from(U256::from(2)),
number: 0,
total_difficulty: U256::from(0),
location: BlockLocation::BranchBecomingCanonChain(BranchBecomingCanonChainData {
ancestor: H256::from(U256::from(0)),
enacted: vec![H256::from(U256::from(1))],
retracted: vec![H256::from(U256::from(3)), H256::from(U256::from(4))],
}),
};
assert_eq!(ImportRoute::from(info), ImportRoute {
retracted: vec![H256::from(U256::from(3)), H256::from(U256::from(4))],
enacted: vec![H256::from(U256::from(1)), H256::from(U256::from(2))],
omitted: vec![],
});
}
assert_eq!(
ImportRoute::from(info),
ImportRoute {
retracted: vec![H256::from(U256::from(3)), H256::from(U256::from(4))],
enacted: vec![H256::from(U256::from(1)), H256::from(U256::from(2))],
omitted: vec![],
}
);
}
}

View File

@ -28,10 +28,12 @@ mod update;
pub mod generator;
pub use self::blockchain::{BlockProvider, BlockChain, BlockChainDB, BlockChainDBHandler};
pub use self::cache::CacheSize;
pub use self::config::Config;
pub use self::import_route::ImportRoute;
pub use self::update::ExtrasInsert;
pub use ethcore_db::keys::{BlockReceipts, BlockDetails, TransactionAddress, BlockNumberKey};
pub use self::{
blockchain::{BlockChain, BlockChainDB, BlockChainDBHandler, BlockProvider},
cache::CacheSize,
config::Config,
import_route::ImportRoute,
update::ExtrasInsert,
};
pub use common_types::tree_route::TreeRoute;
pub use ethcore_db::keys::{BlockDetails, BlockNumberKey, BlockReceipts, TransactionAddress};

View File

@ -16,36 +16,34 @@
use std::collections::HashMap;
use common_types::BlockNumber;
use common_types::encoded::Block;
use common_types::engines::ForkChoice;
use common_types::{encoded::Block, engines::ForkChoice, BlockNumber};
use ethcore_db::keys::{BlockDetails, BlockReceipts, TransactionAddress};
use ethereum_types::{H256, Bloom};
use ethereum_types::{Bloom, H256};
use crate::block_info::BlockInfo;
/// Block extras update info.
pub struct ExtrasUpdate {
/// Block info.
pub info: BlockInfo,
/// Current block uncompressed rlp bytes
pub block: Block,
/// Modified block hashes.
pub block_hashes: HashMap<BlockNumber, H256>,
/// Modified block details.
pub block_details: HashMap<H256, BlockDetails>,
/// Modified block receipts.
pub block_receipts: HashMap<H256, BlockReceipts>,
/// Modified blocks blooms.
pub blocks_blooms: Option<(u64, Vec<Bloom>)>,
/// Modified transaction addresses (None signifies removed transactions).
pub transactions_addresses: HashMap<H256, Option<TransactionAddress>>,
/// Block info.
pub info: BlockInfo,
/// Current block uncompressed rlp bytes
pub block: Block,
/// Modified block hashes.
pub block_hashes: HashMap<BlockNumber, H256>,
/// Modified block details.
pub block_details: HashMap<H256, BlockDetails>,
/// Modified block receipts.
pub block_receipts: HashMap<H256, BlockReceipts>,
/// Modified blocks blooms.
pub blocks_blooms: Option<(u64, Vec<Bloom>)>,
/// Modified transaction addresses (None signifies removed transactions).
pub transactions_addresses: HashMap<H256, Option<TransactionAddress>>,
}
/// Extra information in block insertion.
pub struct ExtrasInsert {
/// The primitive fork choice before applying finalization rules.
pub fork_choice: ForkChoice,
/// Is the inserted block considered finalized.
pub is_finalized: bool,
/// The primitive fork choice before applying finalization rules.
pub fork_choice: ForkChoice,
/// Is the inserted block considered finalized.
pub is_finalized: bool,
}

File diff suppressed because it is too large Load Diff

View File

@ -22,12 +22,12 @@ use types::ids::BlockId;
/// Provides `call_contract` method
pub trait CallContract {
/// Like `call`, but with various defaults. Designed to be used for calling contracts.
fn call_contract(&self, id: BlockId, address: Address, data: Bytes) -> Result<Bytes, String>;
/// Like `call`, but with various defaults. Designed to be used for calling contracts.
fn call_contract(&self, id: BlockId, address: Address, data: Bytes) -> Result<Bytes, String>;
}
/// Provides information on a blockchain service and it's registry
pub trait RegistryInfo {
/// Get the address of a particular blockchain service, if available.
fn registry_address(&self, name: String, block: BlockId) -> Option<Address>;
/// Get the address of a particular blockchain service, if available.
fn registry_address(&self, name: String, block: BlockId) -> Option<Address>;
}

View File

@ -16,67 +16,91 @@
//! Database cache manager
use std::collections::{VecDeque, HashSet};
use std::hash::Hash;
use std::{
collections::{HashSet, VecDeque},
hash::Hash,
};
const COLLECTION_QUEUE_SIZE: usize = 8;
/// DB cache manager
pub struct CacheManager<T> {
pref_cache_size: usize,
max_cache_size: usize,
bytes_per_cache_entry: usize,
cache_usage: VecDeque<HashSet<T>>
pref_cache_size: usize,
max_cache_size: usize,
bytes_per_cache_entry: usize,
cache_usage: VecDeque<HashSet<T>>,
}
impl<T> CacheManager<T> where T: Eq + Hash {
/// Create new cache manager with preferred (heap) sizes.
pub fn new(pref_cache_size: usize, max_cache_size: usize, bytes_per_cache_entry: usize) -> Self {
CacheManager {
pref_cache_size: pref_cache_size,
max_cache_size: max_cache_size,
bytes_per_cache_entry: bytes_per_cache_entry,
cache_usage: (0..COLLECTION_QUEUE_SIZE).into_iter().map(|_| Default::default()).collect(),
}
}
impl<T> CacheManager<T>
where
T: Eq + Hash,
{
/// Create new cache manager with preferred (heap) sizes.
pub fn new(
pref_cache_size: usize,
max_cache_size: usize,
bytes_per_cache_entry: usize,
) -> Self {
CacheManager {
pref_cache_size: pref_cache_size,
max_cache_size: max_cache_size,
bytes_per_cache_entry: bytes_per_cache_entry,
cache_usage: (0..COLLECTION_QUEUE_SIZE)
.into_iter()
.map(|_| Default::default())
.collect(),
}
}
/// Mark element as used.
pub fn note_used(&mut self, id: T) {
if !self.cache_usage[0].contains(&id) {
if let Some(c) = self.cache_usage.iter_mut().skip(1).find(|e| e.contains(&id)) {
c.remove(&id);
}
self.cache_usage[0].insert(id);
}
}
/// Mark element as used.
pub fn note_used(&mut self, id: T) {
if !self.cache_usage[0].contains(&id) {
if let Some(c) = self
.cache_usage
.iter_mut()
.skip(1)
.find(|e| e.contains(&id))
{
c.remove(&id);
}
self.cache_usage[0].insert(id);
}
}
/// Collects unused objects from cache.
/// First params is the current size of the cache.
/// Second one is an with objects to remove. It should also return new size of the cache.
pub fn collect_garbage<F>(&mut self, current_size: usize, mut notify_unused: F) where F: FnMut(HashSet<T>) -> usize {
if current_size < self.pref_cache_size {
self.rotate_cache_if_needed();
return;
}
/// Collects unused objects from cache.
/// First params is the current size of the cache.
/// Second one is an with objects to remove. It should also return new size of the cache.
pub fn collect_garbage<F>(&mut self, current_size: usize, mut notify_unused: F)
where
F: FnMut(HashSet<T>) -> usize,
{
if current_size < self.pref_cache_size {
self.rotate_cache_if_needed();
return;
}
for _ in 0..COLLECTION_QUEUE_SIZE {
if let Some(back) = self.cache_usage.pop_back() {
let current_size = notify_unused(back);
self.cache_usage.push_front(Default::default());
if current_size < self.max_cache_size {
break
}
}
}
}
for _ in 0..COLLECTION_QUEUE_SIZE {
if let Some(back) = self.cache_usage.pop_back() {
let current_size = notify_unused(back);
self.cache_usage.push_front(Default::default());
if current_size < self.max_cache_size {
break;
}
}
}
}
fn rotate_cache_if_needed(&mut self) {
if self.cache_usage.is_empty() { return }
fn rotate_cache_if_needed(&mut self) {
if self.cache_usage.is_empty() {
return;
}
if self.cache_usage[0].len() * self.bytes_per_cache_entry > self.pref_cache_size / COLLECTION_QUEUE_SIZE {
if let Some(cache) = self.cache_usage.pop_back() {
self.cache_usage.push_front(cache);
}
}
}
if self.cache_usage[0].len() * self.bytes_per_cache_entry
> self.pref_cache_size / COLLECTION_QUEUE_SIZE
{
if let Some(cache) = self.cache_usage.pop_back() {
self.cache_usage.push_front(cache);
}
}
}
}

View File

@ -16,11 +16,9 @@
//! Database utilities and definitions.
use std::ops::Deref;
use std::hash::Hash;
use std::collections::HashMap;
use parking_lot::RwLock;
use kvdb::{DBTransaction, KeyValueDB};
use parking_lot::RwLock;
use std::{collections::HashMap, hash::Hash, ops::Deref};
use rlp;
@ -47,208 +45,270 @@ pub const NUM_COLUMNS: Option<u32> = Some(8);
/// Modes for updating caches.
#[derive(Clone, Copy)]
pub enum CacheUpdatePolicy {
/// Overwrite entries.
Overwrite,
/// Remove entries.
Remove,
/// Overwrite entries.
Overwrite,
/// Remove entries.
Remove,
}
/// A cache for arbitrary key-value pairs.
pub trait Cache<K, V> {
/// Insert an entry into the cache and get the old value.
fn insert(&mut self, k: K, v: V) -> Option<V>;
/// Insert an entry into the cache and get the old value.
fn insert(&mut self, k: K, v: V) -> Option<V>;
/// Remove an entry from the cache, getting the old value if it existed.
fn remove(&mut self, k: &K) -> Option<V>;
/// Remove an entry from the cache, getting the old value if it existed.
fn remove(&mut self, k: &K) -> Option<V>;
/// Query the cache for a key's associated value.
fn get(&self, k: &K) -> Option<&V>;
/// Query the cache for a key's associated value.
fn get(&self, k: &K) -> Option<&V>;
}
impl<K, V> Cache<K, V> for HashMap<K, V> where K: Hash + Eq {
fn insert(&mut self, k: K, v: V) -> Option<V> {
HashMap::insert(self, k, v)
}
impl<K, V> Cache<K, V> for HashMap<K, V>
where
K: Hash + Eq,
{
fn insert(&mut self, k: K, v: V) -> Option<V> {
HashMap::insert(self, k, v)
}
fn remove(&mut self, k: &K) -> Option<V> {
HashMap::remove(self, k)
}
fn remove(&mut self, k: &K) -> Option<V> {
HashMap::remove(self, k)
}
fn get(&self, k: &K) -> Option<&V> {
HashMap::get(self, k)
}
fn get(&self, k: &K) -> Option<&V> {
HashMap::get(self, k)
}
}
/// Should be used to get database key associated with given value.
pub trait Key<T> {
/// The db key associated with this value.
type Target: Deref<Target = [u8]>;
/// The db key associated with this value.
type Target: Deref<Target = [u8]>;
/// Returns db key.
fn key(&self) -> Self::Target;
/// Returns db key.
fn key(&self) -> Self::Target;
}
/// Should be used to write value into database.
pub trait Writable {
/// Writes the value into the database.
fn write<T, R>(&mut self, col: Option<u32>, key: &Key<T, Target = R>, value: &T) where T: rlp::Encodable, R: Deref<Target = [u8]>;
/// Writes the value into the database.
fn write<T, R>(&mut self, col: Option<u32>, key: &Key<T, Target = R>, value: &T)
where
T: rlp::Encodable,
R: Deref<Target = [u8]>;
/// Deletes key from the databse.
fn delete<T, R>(&mut self, col: Option<u32>, key: &Key<T, Target = R>) where T: rlp::Encodable, R: Deref<Target = [u8]>;
/// Deletes key from the databse.
fn delete<T, R>(&mut self, col: Option<u32>, key: &Key<T, Target = R>)
where
T: rlp::Encodable,
R: Deref<Target = [u8]>;
/// Writes the value into the database and updates the cache.
fn write_with_cache<K, T, R>(&mut self, col: Option<u32>, cache: &mut Cache<K, T>, key: K, value: T, policy: CacheUpdatePolicy) where
K: Key<T, Target = R> + Hash + Eq,
T: rlp::Encodable,
R: Deref<Target = [u8]> {
self.write(col, &key, &value);
match policy {
CacheUpdatePolicy::Overwrite => {
cache.insert(key, value);
},
CacheUpdatePolicy::Remove => {
cache.remove(&key);
}
}
}
/// Writes the value into the database and updates the cache.
fn write_with_cache<K, T, R>(
&mut self,
col: Option<u32>,
cache: &mut Cache<K, T>,
key: K,
value: T,
policy: CacheUpdatePolicy,
) where
K: Key<T, Target = R> + Hash + Eq,
T: rlp::Encodable,
R: Deref<Target = [u8]>,
{
self.write(col, &key, &value);
match policy {
CacheUpdatePolicy::Overwrite => {
cache.insert(key, value);
}
CacheUpdatePolicy::Remove => {
cache.remove(&key);
}
}
}
/// Writes the values into the database and updates the cache.
fn extend_with_cache<K, T, R>(&mut self, col: Option<u32>, cache: &mut Cache<K, T>, values: HashMap<K, T>, policy: CacheUpdatePolicy) where
K: Key<T, Target = R> + Hash + Eq,
T: rlp::Encodable,
R: Deref<Target = [u8]> {
match policy {
CacheUpdatePolicy::Overwrite => {
for (key, value) in values {
self.write(col, &key, &value);
cache.insert(key, value);
}
},
CacheUpdatePolicy::Remove => {
for (key, value) in &values {
self.write(col, key, value);
cache.remove(key);
}
},
}
}
/// Writes and removes the values into the database and updates the cache.
fn extend_with_option_cache<K, T, R>(&mut self, col: Option<u32>, cache: &mut Cache<K, Option<T>>, values: HashMap<K, Option<T>>, policy: CacheUpdatePolicy) where
K: Key<T, Target = R> + Hash + Eq,
T: rlp::Encodable,
R: Deref<Target = [u8]> {
match policy {
CacheUpdatePolicy::Overwrite => {
for (key, value) in values {
match value {
Some(ref v) => self.write(col, &key, v),
None => self.delete(col, &key),
}
cache.insert(key, value);
}
},
CacheUpdatePolicy::Remove => {
for (key, value) in values {
match value {
Some(v) => self.write(col, &key, &v),
None => self.delete(col, &key),
}
cache.remove(&key);
}
},
}
}
/// Writes the values into the database and updates the cache.
fn extend_with_cache<K, T, R>(
&mut self,
col: Option<u32>,
cache: &mut Cache<K, T>,
values: HashMap<K, T>,
policy: CacheUpdatePolicy,
) where
K: Key<T, Target = R> + Hash + Eq,
T: rlp::Encodable,
R: Deref<Target = [u8]>,
{
match policy {
CacheUpdatePolicy::Overwrite => {
for (key, value) in values {
self.write(col, &key, &value);
cache.insert(key, value);
}
}
CacheUpdatePolicy::Remove => {
for (key, value) in &values {
self.write(col, key, value);
cache.remove(key);
}
}
}
}
/// Writes and removes the values into the database and updates the cache.
fn extend_with_option_cache<K, T, R>(
&mut self,
col: Option<u32>,
cache: &mut Cache<K, Option<T>>,
values: HashMap<K, Option<T>>,
policy: CacheUpdatePolicy,
) where
K: Key<T, Target = R> + Hash + Eq,
T: rlp::Encodable,
R: Deref<Target = [u8]>,
{
match policy {
CacheUpdatePolicy::Overwrite => {
for (key, value) in values {
match value {
Some(ref v) => self.write(col, &key, v),
None => self.delete(col, &key),
}
cache.insert(key, value);
}
}
CacheUpdatePolicy::Remove => {
for (key, value) in values {
match value {
Some(v) => self.write(col, &key, &v),
None => self.delete(col, &key),
}
cache.remove(&key);
}
}
}
}
}
/// Should be used to read values from database.
pub trait Readable {
/// Returns value for given key.
fn read<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>) -> Option<T> where
T: rlp::Decodable,
R: Deref<Target = [u8]>;
/// Returns value for given key.
fn read<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>) -> Option<T>
where
T: rlp::Decodable,
R: Deref<Target = [u8]>;
/// Returns value for given key either in cache or in database.
fn read_with_cache<K, T, C>(&self, col: Option<u32>, cache: &RwLock<C>, key: &K) -> Option<T> where
K: Key<T> + Eq + Hash + Clone,
T: Clone + rlp::Decodable,
C: Cache<K, T> {
{
let read = cache.read();
if let Some(v) = read.get(key) {
return Some(v.clone());
}
}
/// Returns value for given key either in cache or in database.
fn read_with_cache<K, T, C>(&self, col: Option<u32>, cache: &RwLock<C>, key: &K) -> Option<T>
where
K: Key<T> + Eq + Hash + Clone,
T: Clone + rlp::Decodable,
C: Cache<K, T>,
{
{
let read = cache.read();
if let Some(v) = read.get(key) {
return Some(v.clone());
}
}
self.read(col, key).map(|value: T|{
let mut write = cache.write();
write.insert(key.clone(), value.clone());
value
})
}
self.read(col, key).map(|value: T| {
let mut write = cache.write();
write.insert(key.clone(), value.clone());
value
})
}
/// Returns value for given key either in two-layered cache or in database.
fn read_with_two_layer_cache<K, T, C>(&self, col: Option<u32>, l1_cache: &RwLock<C>, l2_cache: &RwLock<C>, key: &K) -> Option<T> where
K: Key<T> + Eq + Hash + Clone,
T: Clone + rlp::Decodable,
C: Cache<K, T> {
{
let read = l1_cache.read();
if let Some(v) = read.get(key) {
return Some(v.clone());
}
}
/// Returns value for given key either in two-layered cache or in database.
fn read_with_two_layer_cache<K, T, C>(
&self,
col: Option<u32>,
l1_cache: &RwLock<C>,
l2_cache: &RwLock<C>,
key: &K,
) -> Option<T>
where
K: Key<T> + Eq + Hash + Clone,
T: Clone + rlp::Decodable,
C: Cache<K, T>,
{
{
let read = l1_cache.read();
if let Some(v) = read.get(key) {
return Some(v.clone());
}
}
self.read_with_cache(col, l2_cache, key)
}
self.read_with_cache(col, l2_cache, key)
}
/// Returns true if given value exists.
fn exists<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>) -> bool where R: Deref<Target= [u8]>;
/// Returns true if given value exists.
fn exists<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>) -> bool
where
R: Deref<Target = [u8]>;
/// Returns true if given value exists either in cache or in database.
fn exists_with_cache<K, T, R, C>(&self, col: Option<u32>, cache: &RwLock<C>, key: &K) -> bool where
K: Eq + Hash + Key<T, Target = R>,
R: Deref<Target = [u8]>,
C: Cache<K, T> {
{
let read = cache.read();
if read.get(key).is_some() {
return true;
}
}
/// Returns true if given value exists either in cache or in database.
fn exists_with_cache<K, T, R, C>(&self, col: Option<u32>, cache: &RwLock<C>, key: &K) -> bool
where
K: Eq + Hash + Key<T, Target = R>,
R: Deref<Target = [u8]>,
C: Cache<K, T>,
{
{
let read = cache.read();
if read.get(key).is_some() {
return true;
}
}
self.exists::<T, R>(col, key)
}
self.exists::<T, R>(col, key)
}
}
impl Writable for DBTransaction {
fn write<T, R>(&mut self, col: Option<u32>, key: &Key<T, Target = R>, value: &T) where T: rlp::Encodable, R: Deref<Target = [u8]> {
self.put(col, &key.key(), &rlp::encode(value));
}
fn write<T, R>(&mut self, col: Option<u32>, key: &Key<T, Target = R>, value: &T)
where
T: rlp::Encodable,
R: Deref<Target = [u8]>,
{
self.put(col, &key.key(), &rlp::encode(value));
}
fn delete<T, R>(&mut self, col: Option<u32>, key: &Key<T, Target = R>) where T: rlp::Encodable, R: Deref<Target = [u8]> {
self.delete(col, &key.key());
}
fn delete<T, R>(&mut self, col: Option<u32>, key: &Key<T, Target = R>)
where
T: rlp::Encodable,
R: Deref<Target = [u8]>,
{
self.delete(col, &key.key());
}
}
impl<KVDB: KeyValueDB + ?Sized> Readable for KVDB {
fn read<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>) -> Option<T>
where T: rlp::Decodable, R: Deref<Target = [u8]> {
self.get(col, &key.key())
.expect(&format!("db get failed, key: {:?}", &key.key() as &[u8]))
.map(|v| rlp::decode(&v).expect("decode db value failed") )
fn read<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>) -> Option<T>
where
T: rlp::Decodable,
R: Deref<Target = [u8]>,
{
self.get(col, &key.key())
.expect(&format!("db get failed, key: {:?}", &key.key() as &[u8]))
.map(|v| rlp::decode(&v).expect("decode db value failed"))
}
}
fn exists<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>) -> bool
where
R: Deref<Target = [u8]>,
{
let result = self.get(col, &key.key());
fn exists<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>) -> bool where R: Deref<Target = [u8]> {
let result = self.get(col, &key.key());
match result {
Ok(v) => v.is_some(),
Err(err) => {
panic!("db get failed, key: {:?}, err: {:?}", &key.key() as &[u8], err);
}
}
}
match result {
Ok(v) => v.is_some(),
Err(err) => {
panic!(
"db get failed, key: {:?}, err: {:?}",
&key.key() as &[u8],
err
);
}
}
}
}

View File

@ -16,99 +16,96 @@
//! Blockchain DB extras.
use std::io::Write;
use std::ops;
use std::{io::Write, ops};
use common_types::BlockNumber;
use common_types::engines::epoch::Transition as EpochTransition;
use common_types::receipt::Receipt;
use common_types::{engines::epoch::Transition as EpochTransition, receipt::Receipt, BlockNumber};
use ethereum_types::{H256, H264, U256};
use heapsize::HeapSizeOf;
use kvdb::PREFIX_LEN as DB_PREFIX_LEN;
use rlp;
use rlp_derive::{RlpEncodableWrapper, RlpDecodableWrapper, RlpEncodable, RlpDecodable};
use rlp_derive::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper};
use crate::db::Key;
/// Represents index of extra data in database
#[derive(Copy, Debug, Hash, Eq, PartialEq, Clone)]
pub enum ExtrasIndex {
/// Block details index
BlockDetails = 0,
/// Block hash index
BlockHash = 1,
/// Transaction address index
TransactionAddress = 2,
/// Block receipts index
BlockReceipts = 4,
/// Epoch transition data index.
EpochTransitions = 5,
/// Pending epoch transition data index.
PendingEpochTransition = 6,
/// Block details index
BlockDetails = 0,
/// Block hash index
BlockHash = 1,
/// Transaction address index
TransactionAddress = 2,
/// Block receipts index
BlockReceipts = 4,
/// Epoch transition data index.
EpochTransitions = 5,
/// Pending epoch transition data index.
PendingEpochTransition = 6,
}
fn with_index(hash: &H256, i: ExtrasIndex) -> H264 {
let mut result = H264::default();
result[0] = i as u8;
(*result)[1..].clone_from_slice(hash);
result
let mut result = H264::default();
result[0] = i as u8;
(*result)[1..].clone_from_slice(hash);
result
}
/// Wrapper for block number used as a DB key.
pub struct BlockNumberKey([u8; 5]);
impl ops::Deref for BlockNumberKey {
type Target = [u8];
type Target = [u8];
fn deref(&self) -> &Self::Target {
&self.0
}
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl Key<H256> for BlockNumber {
type Target = BlockNumberKey;
type Target = BlockNumberKey;
fn key(&self) -> Self::Target {
let mut result = [0u8; 5];
result[0] = ExtrasIndex::BlockHash as u8;
result[1] = (self >> 24) as u8;
result[2] = (self >> 16) as u8;
result[3] = (self >> 8) as u8;
result[4] = *self as u8;
BlockNumberKey(result)
}
fn key(&self) -> Self::Target {
let mut result = [0u8; 5];
result[0] = ExtrasIndex::BlockHash as u8;
result[1] = (self >> 24) as u8;
result[2] = (self >> 16) as u8;
result[3] = (self >> 8) as u8;
result[4] = *self as u8;
BlockNumberKey(result)
}
}
impl Key<BlockDetails> for H256 {
type Target = H264;
type Target = H264;
fn key(&self) -> H264 {
with_index(self, ExtrasIndex::BlockDetails)
}
fn key(&self) -> H264 {
with_index(self, ExtrasIndex::BlockDetails)
}
}
impl Key<TransactionAddress> for H256 {
type Target = H264;
type Target = H264;
fn key(&self) -> H264 {
with_index(self, ExtrasIndex::TransactionAddress)
}
fn key(&self) -> H264 {
with_index(self, ExtrasIndex::TransactionAddress)
}
}
impl Key<BlockReceipts> for H256 {
type Target = H264;
type Target = H264;
fn key(&self) -> H264 {
with_index(self, ExtrasIndex::BlockReceipts)
}
fn key(&self) -> H264 {
with_index(self, ExtrasIndex::BlockReceipts)
}
}
impl Key<common_types::engines::epoch::PendingTransition> for H256 {
type Target = H264;
type Target = H264;
fn key(&self) -> H264 {
with_index(self, ExtrasIndex::PendingEpochTransition)
}
fn key(&self) -> H264 {
with_index(self, ExtrasIndex::PendingEpochTransition)
}
}
/// length of epoch keys.
@ -117,153 +114,170 @@ pub const EPOCH_KEY_LEN: usize = DB_PREFIX_LEN + 16;
/// epoch key prefix.
/// used to iterate over all epoch transitions in order from genesis.
pub const EPOCH_KEY_PREFIX: &'static [u8; DB_PREFIX_LEN] = &[
ExtrasIndex::EpochTransitions as u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
ExtrasIndex::EpochTransitions as u8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
];
/// Epoch transitions key
pub struct EpochTransitionsKey([u8; EPOCH_KEY_LEN]);
impl ops::Deref for EpochTransitionsKey {
type Target = [u8];
type Target = [u8];
fn deref(&self) -> &[u8] { &self.0[..] }
fn deref(&self) -> &[u8] {
&self.0[..]
}
}
impl Key<EpochTransitions> for u64 {
type Target = EpochTransitionsKey;
type Target = EpochTransitionsKey;
fn key(&self) -> Self::Target {
let mut arr = [0u8; EPOCH_KEY_LEN];
arr[..DB_PREFIX_LEN].copy_from_slice(&EPOCH_KEY_PREFIX[..]);
fn key(&self) -> Self::Target {
let mut arr = [0u8; EPOCH_KEY_LEN];
arr[..DB_PREFIX_LEN].copy_from_slice(&EPOCH_KEY_PREFIX[..]);
write!(&mut arr[DB_PREFIX_LEN..], "{:016x}", self)
.expect("format arg is valid; no more than 16 chars will be written; qed");
write!(&mut arr[DB_PREFIX_LEN..], "{:016x}", self)
.expect("format arg is valid; no more than 16 chars will be written; qed");
EpochTransitionsKey(arr)
}
EpochTransitionsKey(arr)
}
}
/// Familial details concerning a block
#[derive(Debug, Clone)]
pub struct BlockDetails {
/// Block number
pub number: BlockNumber,
/// Total difficulty of the block and all its parents
pub total_difficulty: U256,
/// Parent block hash
pub parent: H256,
/// List of children block hashes
pub children: Vec<H256>,
/// Whether the block is considered finalized
pub is_finalized: bool,
/// Block number
pub number: BlockNumber,
/// Total difficulty of the block and all its parents
pub total_difficulty: U256,
/// Parent block hash
pub parent: H256,
/// List of children block hashes
pub children: Vec<H256>,
/// Whether the block is considered finalized
pub is_finalized: bool,
}
impl rlp::Encodable for BlockDetails {
fn rlp_append(&self, stream: &mut rlp::RlpStream) {
let use_short_version = !self.is_finalized;
fn rlp_append(&self, stream: &mut rlp::RlpStream) {
let use_short_version = !self.is_finalized;
match use_short_version {
true => { stream.begin_list(4); },
false => { stream.begin_list(5); },
}
match use_short_version {
true => {
stream.begin_list(4);
}
false => {
stream.begin_list(5);
}
}
stream.append(&self.number);
stream.append(&self.total_difficulty);
stream.append(&self.parent);
stream.append_list(&self.children);
if !use_short_version {
stream.append(&self.is_finalized);
}
}
stream.append(&self.number);
stream.append(&self.total_difficulty);
stream.append(&self.parent);
stream.append_list(&self.children);
if !use_short_version {
stream.append(&self.is_finalized);
}
}
}
impl rlp::Decodable for BlockDetails {
fn decode(rlp: &rlp::Rlp) -> Result<Self, rlp::DecoderError> {
let use_short_version = match rlp.item_count()? {
4 => true,
5 => false,
_ => return Err(rlp::DecoderError::RlpIncorrectListLen),
};
fn decode(rlp: &rlp::Rlp) -> Result<Self, rlp::DecoderError> {
let use_short_version = match rlp.item_count()? {
4 => true,
5 => false,
_ => return Err(rlp::DecoderError::RlpIncorrectListLen),
};
Ok(BlockDetails {
number: rlp.val_at(0)?,
total_difficulty: rlp.val_at(1)?,
parent: rlp.val_at(2)?,
children: rlp.list_at(3)?,
is_finalized: if use_short_version {
false
} else {
rlp.val_at(4)?
},
})
}
Ok(BlockDetails {
number: rlp.val_at(0)?,
total_difficulty: rlp.val_at(1)?,
parent: rlp.val_at(2)?,
children: rlp.list_at(3)?,
is_finalized: if use_short_version {
false
} else {
rlp.val_at(4)?
},
})
}
}
impl HeapSizeOf for BlockDetails {
fn heap_size_of_children(&self) -> usize {
self.children.heap_size_of_children()
}
fn heap_size_of_children(&self) -> usize {
self.children.heap_size_of_children()
}
}
/// Represents address of certain transaction within block
#[derive(Debug, PartialEq, Clone, RlpEncodable, RlpDecodable)]
pub struct TransactionAddress {
/// Block hash
pub block_hash: H256,
/// Transaction index within the block
pub index: usize
/// Block hash
pub block_hash: H256,
/// Transaction index within the block
pub index: usize,
}
impl HeapSizeOf for TransactionAddress {
fn heap_size_of_children(&self) -> usize { 0 }
fn heap_size_of_children(&self) -> usize {
0
}
}
/// Contains all block receipts.
#[derive(Clone, RlpEncodableWrapper, RlpDecodableWrapper)]
pub struct BlockReceipts {
/// Block receipts
pub receipts: Vec<Receipt>,
/// Block receipts
pub receipts: Vec<Receipt>,
}
impl BlockReceipts {
/// Create new block receipts wrapper.
pub fn new(receipts: Vec<Receipt>) -> Self {
BlockReceipts {
receipts: receipts
}
}
/// Create new block receipts wrapper.
pub fn new(receipts: Vec<Receipt>) -> Self {
BlockReceipts { receipts: receipts }
}
}
impl HeapSizeOf for BlockReceipts {
fn heap_size_of_children(&self) -> usize {
self.receipts.heap_size_of_children()
}
fn heap_size_of_children(&self) -> usize {
self.receipts.heap_size_of_children()
}
}
/// Candidate transitions to an epoch with specific number.
#[derive(Clone, RlpEncodable, RlpDecodable)]
pub struct EpochTransitions {
/// Epoch number
pub number: u64,
/// List of candidate transitions
pub candidates: Vec<EpochTransition>,
/// Epoch number
pub number: u64,
/// List of candidate transitions
pub candidates: Vec<EpochTransition>,
}
#[cfg(test)]
mod tests {
use rlp::*;
use rlp::*;
use super::BlockReceipts;
use super::BlockReceipts;
#[test]
fn encode_block_receipts() {
let br = BlockReceipts::new(Vec::new());
#[test]
fn encode_block_receipts() {
let br = BlockReceipts::new(Vec::new());
let mut s = RlpStream::new_list(2);
s.append(&br);
assert!(!s.is_finished(), "List shouldn't finished yet");
s.append(&br);
assert!(s.is_finished(), "List should be finished now");
s.out();
}
let mut s = RlpStream::new_list(2);
s.append(&br);
assert!(!s.is_finished(), "List shouldn't finished yet");
s.append(&br);
assert!(s.is_finished(), "List should be finished now");
s.out();
}
}

View File

@ -20,7 +20,7 @@
mod db;
pub mod keys;
pub mod cache_manager;
pub mod keys;
pub use self::db::*;

View File

@ -20,140 +20,140 @@
extern crate criterion;
extern crate bit_set;
extern crate ethereum_types;
extern crate parking_lot;
extern crate heapsize;
extern crate vm;
extern crate evm;
extern crate heapsize;
extern crate keccak_hash as hash;
extern crate memory_cache;
extern crate parity_bytes as bytes;
extern crate parking_lot;
extern crate rustc_hex;
extern crate vm;
use criterion::{Criterion, Bencher, black_box};
use std::str::FromStr;
use std::sync::Arc;
use ethereum_types::{U256, Address};
use vm::{ActionParams, Result, GasLeft, Ext};
use vm::tests::FakeExt;
use criterion::{black_box, Bencher, Criterion};
use ethereum_types::{Address, U256};
use evm::Factory;
use rustc_hex::FromHex;
use std::{str::FromStr, sync::Arc};
use vm::{tests::FakeExt, ActionParams, Ext, GasLeft, Result};
criterion_group!(
basic,
simple_loop_log0_usize,
simple_loop_log0_u256,
mem_gas_calculation_same_usize,
mem_gas_calculation_same_u256,
mem_gas_calculation_increasing_usize,
mem_gas_calculation_increasing_u256,
blockhash_mulmod_small,
blockhash_mulmod_large,
basic,
simple_loop_log0_usize,
simple_loop_log0_u256,
mem_gas_calculation_same_usize,
mem_gas_calculation_same_u256,
mem_gas_calculation_increasing_usize,
mem_gas_calculation_increasing_u256,
blockhash_mulmod_small,
blockhash_mulmod_large,
);
criterion_main!(basic);
fn simple_loop_log0_usize(b: &mut Criterion) {
b.bench_function("simple_loop_log0_usize", |b| {
simple_loop_log0(U256::from(::std::usize::MAX), b);
});
b.bench_function("simple_loop_log0_usize", |b| {
simple_loop_log0(U256::from(::std::usize::MAX), b);
});
}
fn simple_loop_log0_u256(b: &mut Criterion) {
b.bench_function("simple_loop_log0_u256", |b| {
simple_loop_log0(!U256::zero(), b);
});
b.bench_function("simple_loop_log0_u256", |b| {
simple_loop_log0(!U256::zero(), b);
});
}
fn simple_loop_log0(gas: U256, b: &mut Bencher) {
let factory = Factory::default();
let mut ext = FakeExt::new();
let factory = Factory::default();
let mut ext = FakeExt::new();
let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap();
let code = black_box(
"62ffffff5b600190036000600fa0600357".from_hex().unwrap()
);
let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap();
let code = black_box("62ffffff5b600190036000600fa0600357".from_hex().unwrap());
b.iter(|| {
let mut params = ActionParams::default();
params.address = address.clone();
params.gas = gas;
params.code = Some(Arc::new(code.clone()));
b.iter(|| {
let mut params = ActionParams::default();
params.address = address.clone();
params.gas = gas;
params.code = Some(Arc::new(code.clone()));
let vm = factory.create(params, ext.schedule(), 0);
let vm = factory.create(params, ext.schedule(), 0);
result(vm.exec(&mut ext).ok().unwrap())
});
result(vm.exec(&mut ext).ok().unwrap())
});
}
fn mem_gas_calculation_same_usize(b: &mut Criterion) {
b.bench_function("mem_gas_calculation_same_usize", |b| {
mem_gas_calculation_same(U256::from(::std::usize::MAX), b);
});
b.bench_function("mem_gas_calculation_same_usize", |b| {
mem_gas_calculation_same(U256::from(::std::usize::MAX), b);
});
}
fn mem_gas_calculation_same_u256(b: &mut Criterion) {
b.bench_function("mem_gas_calculation_same_u256", |b| {
mem_gas_calculation_same(!U256::zero(), b);
});
b.bench_function("mem_gas_calculation_same_u256", |b| {
mem_gas_calculation_same(!U256::zero(), b);
});
}
fn mem_gas_calculation_same(gas: U256, b: &mut Bencher) {
let factory = Factory::default();
let mut ext = FakeExt::new();
let factory = Factory::default();
let mut ext = FakeExt::new();
let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap();
let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap();
b.iter(|| {
let code = black_box(
"6110006001556001546000555b610fff805560016000540380600055600c57".from_hex().unwrap()
);
b.iter(|| {
let code = black_box(
"6110006001556001546000555b610fff805560016000540380600055600c57"
.from_hex()
.unwrap(),
);
let mut params = ActionParams::default();
params.address = address.clone();
params.gas = gas;
params.code = Some(Arc::new(code.clone()));
let mut params = ActionParams::default();
params.address = address.clone();
params.gas = gas;
params.code = Some(Arc::new(code.clone()));
let vm = factory.create(params, ext.schedule(), 0);
let vm = factory.create(params, ext.schedule(), 0);
result(vm.exec(&mut ext).ok().unwrap())
});
result(vm.exec(&mut ext).ok().unwrap())
});
}
fn mem_gas_calculation_increasing_usize(b: &mut Criterion) {
b.bench_function("mem_gas_calculation_increasing_usize", |b| {
mem_gas_calculation_increasing(U256::from(::std::usize::MAX), b);
});
b.bench_function("mem_gas_calculation_increasing_usize", |b| {
mem_gas_calculation_increasing(U256::from(::std::usize::MAX), b);
});
}
fn mem_gas_calculation_increasing_u256(b: &mut Criterion) {
b.bench_function("mem_gas_calculation_increasing_u256", |b| {
mem_gas_calculation_increasing(!U256::zero(), b);
});
b.bench_function("mem_gas_calculation_increasing_u256", |b| {
mem_gas_calculation_increasing(!U256::zero(), b);
});
}
fn mem_gas_calculation_increasing(gas: U256, b: &mut Bencher) {
let factory = Factory::default();
let mut ext = FakeExt::new();
let factory = Factory::default();
let mut ext = FakeExt::new();
let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap();
let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap();
b.iter(|| {
let code = black_box(
"6110006001556001546000555b610fff60005401805560016000540380600055600c57".from_hex().unwrap()
);
b.iter(|| {
let code = black_box(
"6110006001556001546000555b610fff60005401805560016000540380600055600c57"
.from_hex()
.unwrap(),
);
let mut params = ActionParams::default();
params.address = address.clone();
params.gas = gas;
params.code = Some(Arc::new(code.clone()));
let mut params = ActionParams::default();
params.address = address.clone();
params.gas = gas;
params.code = Some(Arc::new(code.clone()));
let vm = factory.create(params, ext.schedule(), 0);
let vm = factory.create(params, ext.schedule(), 0);
result(vm.exec(&mut ext).ok().unwrap())
});
result(vm.exec(&mut ext).ok().unwrap())
});
}
fn blockhash_mulmod_small(b: &mut Criterion) {
b.bench_function("blockhash_mulmod_small", |b| {
b.bench_function("blockhash_mulmod_small", |b| {
let factory = Factory::default();
let mut ext = FakeExt::new();
@ -177,7 +177,7 @@ fn blockhash_mulmod_small(b: &mut Criterion) {
}
fn blockhash_mulmod_large(b: &mut Criterion) {
b.bench_function("blockhash_mulmod_large", |b| {
b.bench_function("blockhash_mulmod_large", |b| {
let factory = Factory::default();
let mut ext = FakeExt::new();
@ -201,9 +201,9 @@ fn blockhash_mulmod_large(b: &mut Criterion) {
}
fn result(r: Result<evm::GasLeft>) -> U256 {
match r {
Ok(GasLeft::Known(gas_left)) => gas_left,
Ok(GasLeft::NeedsReturn { gas_left, .. }) => gas_left,
_ => U256::zero(),
}
match r {
Ok(GasLeft::Known(gas_left)) => gas_left,
Ok(GasLeft::NeedsReturn { gas_left, .. }) => gas_left,
_ => U256::zero(),
}
}

View File

@ -16,20 +16,20 @@
//! Evm interface.
use std::{ops, cmp, fmt};
use ethereum_types::{U128, U256, U512};
use vm::{Ext, Result, ReturnData, GasLeft, Error};
use std::{cmp, fmt, ops};
use vm::{Error, Ext, GasLeft, Result, ReturnData};
/// Finalization result. Gas Left: either it is a known value, or it needs to be computed by processing
/// a return instruction.
#[derive(Debug)]
pub struct FinalizationResult {
/// Final amount of gas left.
pub gas_left: U256,
/// Apply execution state changes or revert them.
pub apply_state: bool,
/// Return data buffer.
pub return_data: ReturnData,
/// Final amount of gas left.
pub gas_left: U256,
/// Apply execution state changes or revert them.
pub apply_state: bool,
/// Return data buffer.
pub return_data: ReturnData,
}
/// Types that can be "finalized" using an EVM.
@ -37,177 +37,188 @@ pub struct FinalizationResult {
/// In practice, this is just used to define an inherent impl on
/// `Reult<GasLeft<'a>>`.
pub trait Finalize {
/// Consume the externalities, call return if necessary, and produce call result.
fn finalize<E: Ext>(self, ext: E) -> Result<FinalizationResult>;
/// Consume the externalities, call return if necessary, and produce call result.
fn finalize<E: Ext>(self, ext: E) -> Result<FinalizationResult>;
}
impl Finalize for Result<GasLeft> {
fn finalize<E: Ext>(self, ext: E) -> Result<FinalizationResult> {
match self {
Ok(GasLeft::Known(gas_left)) => {
Ok(FinalizationResult {
gas_left,
apply_state: true,
return_data: ReturnData::empty()
})
},
Ok(GasLeft::NeedsReturn { gas_left, data, apply_state }) => {
ext.ret(&gas_left, &data, apply_state).map(|gas_left|
FinalizationResult { gas_left, apply_state, return_data: data }
)
},
Err(err) => Err(err),
}
}
fn finalize<E: Ext>(self, ext: E) -> Result<FinalizationResult> {
match self {
Ok(GasLeft::Known(gas_left)) => Ok(FinalizationResult {
gas_left,
apply_state: true,
return_data: ReturnData::empty(),
}),
Ok(GasLeft::NeedsReturn {
gas_left,
data,
apply_state,
}) => ext
.ret(&gas_left, &data, apply_state)
.map(|gas_left| FinalizationResult {
gas_left,
apply_state,
return_data: data,
}),
Err(err) => Err(err),
}
}
}
impl Finalize for Error {
fn finalize<E: Ext>(self, _ext: E) -> Result<FinalizationResult> {
Err(self)
}
fn finalize<E: Ext>(self, _ext: E) -> Result<FinalizationResult> {
Err(self)
}
}
/// Cost calculation type. For low-gas usage we calculate costs using usize instead of U256
pub trait CostType: Sized + From<usize> + Copy + Send
+ ops::Mul<Output=Self> + ops::Div<Output=Self> + ops::Add<Output=Self> + ops::Sub<Output=Self>
+ ops::Shr<usize, Output=Self> + ops::Shl<usize, Output=Self>
+ cmp::Ord + fmt::Debug {
/// Converts this cost into `U256`
fn as_u256(&self) -> U256;
/// Tries to fit `U256` into this `Cost` type
fn from_u256(val: U256) -> Result<Self>;
/// Convert to usize (may panic)
fn as_usize(&self) -> usize;
/// Add with overflow
fn overflow_add(self, other: Self) -> (Self, bool);
/// Multiple with overflow
fn overflow_mul(self, other: Self) -> (Self, bool);
/// Single-step full multiplication and shift: `(self*other) >> shr`
/// Should not overflow on intermediate steps
fn overflow_mul_shr(self, other: Self, shr: usize) -> (Self, bool);
pub trait CostType:
Sized
+ From<usize>
+ Copy
+ Send
+ ops::Mul<Output = Self>
+ ops::Div<Output = Self>
+ ops::Add<Output = Self>
+ ops::Sub<Output = Self>
+ ops::Shr<usize, Output = Self>
+ ops::Shl<usize, Output = Self>
+ cmp::Ord
+ fmt::Debug
{
/// Converts this cost into `U256`
fn as_u256(&self) -> U256;
/// Tries to fit `U256` into this `Cost` type
fn from_u256(val: U256) -> Result<Self>;
/// Convert to usize (may panic)
fn as_usize(&self) -> usize;
/// Add with overflow
fn overflow_add(self, other: Self) -> (Self, bool);
/// Multiple with overflow
fn overflow_mul(self, other: Self) -> (Self, bool);
/// Single-step full multiplication and shift: `(self*other) >> shr`
/// Should not overflow on intermediate steps
fn overflow_mul_shr(self, other: Self, shr: usize) -> (Self, bool);
}
impl CostType for U256 {
fn as_u256(&self) -> U256 {
*self
}
fn as_u256(&self) -> U256 {
*self
}
fn from_u256(val: U256) -> Result<Self> {
Ok(val)
}
fn from_u256(val: U256) -> Result<Self> {
Ok(val)
}
fn as_usize(&self) -> usize {
self.as_u64() as usize
}
fn as_usize(&self) -> usize {
self.as_u64() as usize
}
fn overflow_add(self, other: Self) -> (Self, bool) {
self.overflowing_add(other)
}
fn overflow_add(self, other: Self) -> (Self, bool) {
self.overflowing_add(other)
}
fn overflow_mul(self, other: Self) -> (Self, bool) {
self.overflowing_mul(other)
}
fn overflow_mul(self, other: Self) -> (Self, bool) {
self.overflowing_mul(other)
}
fn overflow_mul_shr(self, other: Self, shr: usize) -> (Self, bool) {
let x = self.full_mul(other);
let U512(parts) = x;
let overflow = (parts[4] | parts[5] | parts[6] | parts[7]) > 0;
let U512(parts) = x >> shr;
(
U256([parts[0], parts[1], parts[2], parts[3]]),
overflow
)
}
fn overflow_mul_shr(self, other: Self, shr: usize) -> (Self, bool) {
let x = self.full_mul(other);
let U512(parts) = x;
let overflow = (parts[4] | parts[5] | parts[6] | parts[7]) > 0;
let U512(parts) = x >> shr;
(U256([parts[0], parts[1], parts[2], parts[3]]), overflow)
}
}
impl CostType for usize {
fn as_u256(&self) -> U256 {
U256::from(*self)
}
fn as_u256(&self) -> U256 {
U256::from(*self)
}
fn from_u256(val: U256) -> Result<Self> {
let res = val.low_u64() as usize;
fn from_u256(val: U256) -> Result<Self> {
let res = val.low_u64() as usize;
// validate if value fits into usize
if U256::from(res) != val {
return Err(Error::OutOfGas);
}
// validate if value fits into usize
if U256::from(res) != val {
return Err(Error::OutOfGas);
}
Ok(res)
}
Ok(res)
}
fn as_usize(&self) -> usize {
*self
}
fn as_usize(&self) -> usize {
*self
}
fn overflow_add(self, other: Self) -> (Self, bool) {
self.overflowing_add(other)
}
fn overflow_add(self, other: Self) -> (Self, bool) {
self.overflowing_add(other)
}
fn overflow_mul(self, other: Self) -> (Self, bool) {
self.overflowing_mul(other)
}
fn overflow_mul(self, other: Self) -> (Self, bool) {
self.overflowing_mul(other)
}
fn overflow_mul_shr(self, other: Self, shr: usize) -> (Self, bool) {
let (c, o) = U128::from(self).overflowing_mul(U128::from(other));
let U128(parts) = c;
let overflow = o | (parts[1] > 0);
let U128(parts) = c >> shr;
let result = parts[0] as usize;
let overflow = overflow | (parts[0] > result as u64);
(result, overflow)
}
fn overflow_mul_shr(self, other: Self, shr: usize) -> (Self, bool) {
let (c, o) = U128::from(self).overflowing_mul(U128::from(other));
let U128(parts) = c;
let overflow = o | (parts[1] > 0);
let U128(parts) = c >> shr;
let result = parts[0] as usize;
let overflow = overflow | (parts[0] > result as u64);
(result, overflow)
}
}
#[cfg(test)]
mod tests {
use ethereum_types::U256;
use super::CostType;
use super::CostType;
use ethereum_types::U256;
#[test]
fn should_calculate_overflow_mul_shr_without_overflow() {
// given
let num = 1048576;
#[test]
fn should_calculate_overflow_mul_shr_without_overflow() {
// given
let num = 1048576;
// when
let (res1, o1) = U256::from(num).overflow_mul_shr(U256::from(num), 20);
let (res2, o2) = num.overflow_mul_shr(num, 20);
// when
let (res1, o1) = U256::from(num).overflow_mul_shr(U256::from(num), 20);
let (res2, o2) = num.overflow_mul_shr(num, 20);
// then
assert_eq!(res1, U256::from(num));
assert!(!o1);
assert_eq!(res2, num);
assert!(!o2);
}
// then
assert_eq!(res1, U256::from(num));
assert!(!o1);
assert_eq!(res2, num);
assert!(!o2);
}
#[test]
fn should_calculate_overflow_mul_shr_with_overflow() {
// given
let max = u64::max_value();
let num1 = U256([max, max, max, max]);
let num2 = usize::max_value();
#[test]
fn should_calculate_overflow_mul_shr_with_overflow() {
// given
let max = u64::max_value();
let num1 = U256([max, max, max, max]);
let num2 = usize::max_value();
// when
let (res1, o1) = num1.overflow_mul_shr(num1, 256);
let (res2, o2) = num2.overflow_mul_shr(num2, 64);
// when
let (res1, o1) = num1.overflow_mul_shr(num1, 256);
let (res2, o2) = num2.overflow_mul_shr(num2, 64);
// then
assert_eq!(res2, num2 - 1);
assert!(o2);
// then
assert_eq!(res2, num2 - 1);
assert!(o2);
assert_eq!(res1, !U256::zero() - U256::one());
assert!(o1);
}
assert_eq!(res1, !U256::zero() - U256::one());
assert!(o1);
}
#[test]
fn should_validate_u256_to_usize_conversion() {
// given
let v = U256::from(usize::max_value()) + U256::from(1);
#[test]
fn should_validate_u256_to_usize_conversion() {
// given
let v = U256::from(usize::max_value()) + U256::from(1);
// when
let res = usize::from_u256(v);
// when
let res = usize::from_u256(v);
// then
assert!(res.is_err());
}
// then
assert!(res.is_err());
}
}

View File

@ -16,67 +16,76 @@
//! Evm factory.
//!
use super::{interpreter::SharedCache, vm::ActionParams, vmtype::VMType};
use ethereum_types::U256;
use std::sync::Arc;
use vm::{Exec, Schedule};
use ethereum_types::U256;
use super::vm::ActionParams;
use super::interpreter::SharedCache;
use super::vmtype::VMType;
/// Evm factory. Creates appropriate Evm.
#[derive(Clone)]
pub struct Factory {
evm: VMType,
evm_cache: Arc<SharedCache>,
evm: VMType,
evm_cache: Arc<SharedCache>,
}
impl Factory {
/// Create fresh instance of VM
/// Might choose implementation depending on supplied gas.
pub fn create(&self, params: ActionParams, schedule: &Schedule, depth: usize) -> Box<Exec> {
match self.evm {
VMType::Interpreter => if Self::can_fit_in_usize(&params.gas) {
Box::new(super::interpreter::Interpreter::<usize>::new(params, self.evm_cache.clone(), schedule, depth))
} else {
Box::new(super::interpreter::Interpreter::<U256>::new(params, self.evm_cache.clone(), schedule, depth))
}
}
}
/// Create fresh instance of VM
/// Might choose implementation depending on supplied gas.
pub fn create(&self, params: ActionParams, schedule: &Schedule, depth: usize) -> Box<Exec> {
match self.evm {
VMType::Interpreter => {
if Self::can_fit_in_usize(&params.gas) {
Box::new(super::interpreter::Interpreter::<usize>::new(
params,
self.evm_cache.clone(),
schedule,
depth,
))
} else {
Box::new(super::interpreter::Interpreter::<U256>::new(
params,
self.evm_cache.clone(),
schedule,
depth,
))
}
}
}
}
/// Create new instance of specific `VMType` factory, with a size in bytes
/// for caching jump destinations.
pub fn new(evm: VMType, cache_size: usize) -> Self {
Factory {
evm,
evm_cache: Arc::new(SharedCache::new(cache_size)),
}
}
/// Create new instance of specific `VMType` factory, with a size in bytes
/// for caching jump destinations.
pub fn new(evm: VMType, cache_size: usize) -> Self {
Factory {
evm,
evm_cache: Arc::new(SharedCache::new(cache_size)),
}
}
fn can_fit_in_usize(gas: &U256) -> bool {
gas == &U256::from(gas.low_u64() as usize)
}
fn can_fit_in_usize(gas: &U256) -> bool {
gas == &U256::from(gas.low_u64() as usize)
}
}
impl Default for Factory {
/// Returns native rust evm factory
fn default() -> Factory {
Factory {
evm: VMType::Interpreter,
evm_cache: Arc::new(SharedCache::default()),
}
}
/// Returns native rust evm factory
fn default() -> Factory {
Factory {
evm: VMType::Interpreter,
evm_cache: Arc::new(SharedCache::default()),
}
}
}
#[test]
fn test_create_vm() {
use vm::Ext;
use vm::tests::FakeExt;
use bytes::Bytes;
use bytes::Bytes;
use vm::{tests::FakeExt, Ext};
let mut params = ActionParams::default();
params.code = Some(Arc::new(Bytes::default()));
let ext = FakeExt::new();
let _vm = Factory::default().create(params, ext.schedule(), ext.depth());
let mut params = ActionParams::default();
params.code = Some(Arc::new(Bytes::default()));
let ext = FakeExt::new();
let _vm = Factory::default().create(params, ext.schedule(), ext.depth());
}
/// Create tests by injecting different VM factories

File diff suppressed because it is too large Load Diff

View File

@ -14,475 +14,503 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::cmp;
use ethereum_types::{U256, H256};
use super::u256_to_address;
use ethereum_types::{H256, U256};
use std::cmp;
use {evm, vm};
use evm;
use instructions::{self, Instruction, InstructionInfo};
use interpreter::stack::Stack;
use vm::Schedule;
use vm::{self, Schedule};
macro_rules! overflowing {
($x: expr) => {{
let (v, overflow) = $x;
if overflow { return Err(vm::Error::OutOfGas); }
v
}}
($x: expr) => {{
let (v, overflow) = $x;
if overflow {
return Err(vm::Error::OutOfGas);
}
v
}};
}
enum Request<Cost: ::evm::CostType> {
Gas(Cost),
GasMem(Cost, Cost),
GasMemProvide(Cost, Cost, Option<U256>),
GasMemCopy(Cost, Cost, Cost)
Gas(Cost),
GasMem(Cost, Cost),
GasMemProvide(Cost, Cost, Option<U256>),
GasMemCopy(Cost, Cost, Cost),
}
pub struct InstructionRequirements<Cost> {
pub gas_cost: Cost,
pub provide_gas: Option<Cost>,
pub memory_total_gas: Cost,
pub memory_required_size: usize,
pub gas_cost: Cost,
pub provide_gas: Option<Cost>,
pub memory_total_gas: Cost,
pub memory_required_size: usize,
}
pub struct Gasometer<Gas> {
pub current_gas: Gas,
pub current_mem_gas: Gas,
pub current_gas: Gas,
pub current_mem_gas: Gas,
}
impl<Gas: evm::CostType> Gasometer<Gas> {
pub fn new(current_gas: Gas) -> Self {
Gasometer {
current_gas: current_gas,
current_mem_gas: Gas::from(0),
}
}
pub fn new(current_gas: Gas) -> Self {
Gasometer {
current_gas: current_gas,
current_mem_gas: Gas::from(0),
}
}
pub fn verify_gas(&self, gas_cost: &Gas) -> vm::Result<()> {
match &self.current_gas < gas_cost {
true => Err(vm::Error::OutOfGas),
false => Ok(()),
}
}
pub fn verify_gas(&self, gas_cost: &Gas) -> vm::Result<()> {
match &self.current_gas < gas_cost {
true => Err(vm::Error::OutOfGas),
false => Ok(())
}
}
/// How much gas is provided to a CALL/CREATE, given that we need to deduct `needed` for this operation
/// and that we `requested` some.
pub fn gas_provided(
&self,
schedule: &Schedule,
needed: Gas,
requested: Option<U256>,
) -> vm::Result<Gas> {
// Try converting requested gas to `Gas` (`U256/u64`)
// but in EIP150 even if we request more we should never fail from OOG
let requested = requested.map(Gas::from_u256);
/// How much gas is provided to a CALL/CREATE, given that we need to deduct `needed` for this operation
/// and that we `requested` some.
pub fn gas_provided(&self, schedule: &Schedule, needed: Gas, requested: Option<U256>) -> vm::Result<Gas> {
// Try converting requested gas to `Gas` (`U256/u64`)
// but in EIP150 even if we request more we should never fail from OOG
let requested = requested.map(Gas::from_u256);
match schedule.sub_gas_cap_divisor {
Some(cap_divisor) if self.current_gas >= needed => {
let gas_remaining = self.current_gas - needed;
let max_gas_provided = match cap_divisor {
64 => gas_remaining - (gas_remaining >> 6),
cap_divisor => gas_remaining - gas_remaining / Gas::from(cap_divisor),
};
match schedule.sub_gas_cap_divisor {
Some(cap_divisor) if self.current_gas >= needed => {
let gas_remaining = self.current_gas - needed;
let max_gas_provided = match cap_divisor {
64 => gas_remaining - (gas_remaining >> 6),
cap_divisor => gas_remaining - gas_remaining / Gas::from(cap_divisor),
};
if let Some(Ok(r)) = requested {
Ok(cmp::min(r, max_gas_provided))
} else {
Ok(max_gas_provided)
}
}
_ => {
if let Some(r) = requested {
r
} else if self.current_gas >= needed {
Ok(self.current_gas - needed)
} else {
Ok(0.into())
}
}
}
}
if let Some(Ok(r)) = requested {
Ok(cmp::min(r, max_gas_provided))
} else {
Ok(max_gas_provided)
}
},
_ => {
if let Some(r) = requested {
r
} else if self.current_gas >= needed {
Ok(self.current_gas - needed)
} else {
Ok(0.into())
}
},
}
}
/// Determine how much gas is used by the given instruction, given the machine's state.
///
/// We guarantee that the final element of the returned tuple (`provided`) will be `Some`
/// iff the `instruction` is one of `CREATE`, or any of the `CALL` variants. In this case,
/// it will be the amount of gas that the current context provides to the child context.
pub fn requirements(
&mut self,
ext: &vm::Ext,
instruction: Instruction,
info: &InstructionInfo,
stack: &Stack<U256>,
current_mem_size: usize,
) -> vm::Result<InstructionRequirements<Gas>> {
let schedule = ext.schedule();
let tier = info.tier.idx();
let default_gas = Gas::from(schedule.tier_step_gas[tier]);
/// Determine how much gas is used by the given instruction, given the machine's state.
///
/// We guarantee that the final element of the returned tuple (`provided`) will be `Some`
/// iff the `instruction` is one of `CREATE`, or any of the `CALL` variants. In this case,
/// it will be the amount of gas that the current context provides to the child context.
pub fn requirements(
&mut self,
ext: &vm::Ext,
instruction: Instruction,
info: &InstructionInfo,
stack: &Stack<U256>,
current_mem_size: usize,
) -> vm::Result<InstructionRequirements<Gas>> {
let schedule = ext.schedule();
let tier = info.tier.idx();
let default_gas = Gas::from(schedule.tier_step_gas[tier]);
let cost = match instruction {
instructions::JUMPDEST => Request::Gas(Gas::from(1)),
instructions::SSTORE => {
if schedule.eip1706 && self.current_gas <= Gas::from(schedule.call_stipend) {
return Err(vm::Error::OutOfGas);
}
let address = H256::from(stack.peek(0));
let newval = stack.peek(1);
let val = U256::from(&*ext.storage_at(&address)?);
let cost = match instruction {
instructions::JUMPDEST => {
Request::Gas(Gas::from(1))
},
instructions::SSTORE => {
if schedule.eip1706 && self.current_gas <= Gas::from(schedule.call_stipend) {
return Err(vm::Error::OutOfGas);
}
let address = H256::from(stack.peek(0));
let newval = stack.peek(1);
let val = U256::from(&*ext.storage_at(&address)?);
let gas = if schedule.eip1283 {
let orig = U256::from(&*ext.initial_storage_at(&address)?);
calculate_eip1283_sstore_gas(schedule, &orig, &val, &newval)
} else {
if val.is_zero() && !newval.is_zero() {
schedule.sstore_set_gas
} else {
// Refund for below case is added when actually executing sstore
// !is_zero(&val) && is_zero(newval)
schedule.sstore_reset_gas
}
};
Request::Gas(Gas::from(gas))
}
instructions::SLOAD => Request::Gas(Gas::from(schedule.sload_gas)),
instructions::BALANCE => Request::Gas(Gas::from(schedule.balance_gas)),
instructions::EXTCODESIZE => Request::Gas(Gas::from(schedule.extcodesize_gas)),
instructions::EXTCODEHASH => Request::Gas(Gas::from(schedule.extcodehash_gas)),
instructions::SUICIDE => {
let mut gas = Gas::from(schedule.suicide_gas);
let gas = if schedule.eip1283 {
let orig = U256::from(&*ext.initial_storage_at(&address)?);
calculate_eip1283_sstore_gas(schedule, &orig, &val, &newval)
} else {
if val.is_zero() && !newval.is_zero() {
schedule.sstore_set_gas
} else {
// Refund for below case is added when actually executing sstore
// !is_zero(&val) && is_zero(newval)
schedule.sstore_reset_gas
}
};
Request::Gas(Gas::from(gas))
},
instructions::SLOAD => {
Request::Gas(Gas::from(schedule.sload_gas))
},
instructions::BALANCE => {
Request::Gas(Gas::from(schedule.balance_gas))
},
instructions::EXTCODESIZE => {
Request::Gas(Gas::from(schedule.extcodesize_gas))
},
instructions::EXTCODEHASH => {
Request::Gas(Gas::from(schedule.extcodehash_gas))
},
instructions::SUICIDE => {
let mut gas = Gas::from(schedule.suicide_gas);
let is_value_transfer = !ext.origin_balance()?.is_zero();
let address = u256_to_address(stack.peek(0));
if (!schedule.no_empty && !ext.exists(&address)?)
|| (schedule.no_empty
&& is_value_transfer
&& !ext.exists_and_not_null(&address)?)
{
gas =
overflowing!(gas.overflow_add(schedule.suicide_to_new_account_cost.into()));
}
let is_value_transfer = !ext.origin_balance()?.is_zero();
let address = u256_to_address(stack.peek(0));
if (
!schedule.no_empty && !ext.exists(&address)?
) || (
schedule.no_empty && is_value_transfer && !ext.exists_and_not_null(&address)?
) {
gas = overflowing!(gas.overflow_add(schedule.suicide_to_new_account_cost.into()));
}
Request::Gas(gas)
}
instructions::MSTORE | instructions::MLOAD => {
Request::GasMem(default_gas, mem_needed_const(stack.peek(0), 32)?)
}
instructions::MSTORE8 => {
Request::GasMem(default_gas, mem_needed_const(stack.peek(0), 1)?)
}
instructions::RETURN | instructions::REVERT => {
Request::GasMem(default_gas, mem_needed(stack.peek(0), stack.peek(1))?)
}
instructions::SHA3 => {
let words = overflowing!(to_word_size(Gas::from_u256(*stack.peek(1))?));
let gas = overflowing!(Gas::from(schedule.sha3_gas).overflow_add(overflowing!(
Gas::from(schedule.sha3_word_gas).overflow_mul(words)
)));
Request::GasMem(gas, mem_needed(stack.peek(0), stack.peek(1))?)
}
instructions::CALLDATACOPY | instructions::CODECOPY | instructions::RETURNDATACOPY => {
Request::GasMemCopy(
default_gas,
mem_needed(stack.peek(0), stack.peek(2))?,
Gas::from_u256(*stack.peek(2))?,
)
}
instructions::EXTCODECOPY => Request::GasMemCopy(
schedule.extcodecopy_base_gas.into(),
mem_needed(stack.peek(1), stack.peek(3))?,
Gas::from_u256(*stack.peek(3))?,
),
instructions::LOG0
| instructions::LOG1
| instructions::LOG2
| instructions::LOG3
| instructions::LOG4 => {
let no_of_topics = instruction
.log_topics()
.expect("log_topics always return some for LOG* instructions; qed");
let log_gas = schedule.log_gas + schedule.log_topic_gas * no_of_topics;
Request::Gas(gas)
},
instructions::MSTORE | instructions::MLOAD => {
Request::GasMem(default_gas, mem_needed_const(stack.peek(0), 32)?)
},
instructions::MSTORE8 => {
Request::GasMem(default_gas, mem_needed_const(stack.peek(0), 1)?)
},
instructions::RETURN | instructions::REVERT => {
Request::GasMem(default_gas, mem_needed(stack.peek(0), stack.peek(1))?)
},
instructions::SHA3 => {
let words = overflowing!(to_word_size(Gas::from_u256(*stack.peek(1))?));
let gas = overflowing!(Gas::from(schedule.sha3_gas).overflow_add(overflowing!(Gas::from(schedule.sha3_word_gas).overflow_mul(words))));
Request::GasMem(gas, mem_needed(stack.peek(0), stack.peek(1))?)
},
instructions::CALLDATACOPY | instructions::CODECOPY | instructions::RETURNDATACOPY => {
Request::GasMemCopy(default_gas, mem_needed(stack.peek(0), stack.peek(2))?, Gas::from_u256(*stack.peek(2))?)
},
instructions::EXTCODECOPY => {
Request::GasMemCopy(schedule.extcodecopy_base_gas.into(), mem_needed(stack.peek(1), stack.peek(3))?, Gas::from_u256(*stack.peek(3))?)
},
instructions::LOG0 | instructions::LOG1 | instructions::LOG2 | instructions::LOG3 | instructions::LOG4 => {
let no_of_topics = instruction.log_topics().expect("log_topics always return some for LOG* instructions; qed");
let log_gas = schedule.log_gas + schedule.log_topic_gas * no_of_topics;
let data_gas =
overflowing!(Gas::from_u256(*stack.peek(1))?
.overflow_mul(Gas::from(schedule.log_data_gas)));
let gas = overflowing!(data_gas.overflow_add(Gas::from(log_gas)));
Request::GasMem(gas, mem_needed(stack.peek(0), stack.peek(1))?)
}
instructions::CALL | instructions::CALLCODE => {
let mut gas = Gas::from(schedule.call_gas);
let mem = cmp::max(
mem_needed(stack.peek(5), stack.peek(6))?,
mem_needed(stack.peek(3), stack.peek(4))?,
);
let data_gas = overflowing!(Gas::from_u256(*stack.peek(1))?.overflow_mul(Gas::from(schedule.log_data_gas)));
let gas = overflowing!(data_gas.overflow_add(Gas::from(log_gas)));
Request::GasMem(gas, mem_needed(stack.peek(0), stack.peek(1))?)
},
instructions::CALL | instructions::CALLCODE => {
let mut gas = Gas::from(schedule.call_gas);
let mem = cmp::max(
mem_needed(stack.peek(5), stack.peek(6))?,
mem_needed(stack.peek(3), stack.peek(4))?
);
let address = u256_to_address(stack.peek(1));
let is_value_transfer = !stack.peek(2).is_zero();
let address = u256_to_address(stack.peek(1));
let is_value_transfer = !stack.peek(2).is_zero();
if instruction == instructions::CALL
&& ((!schedule.no_empty && !ext.exists(&address)?)
|| (schedule.no_empty
&& is_value_transfer
&& !ext.exists_and_not_null(&address)?))
{
gas = overflowing!(gas.overflow_add(schedule.call_new_account_gas.into()));
}
if instruction == instructions::CALL && (
(!schedule.no_empty && !ext.exists(&address)?)
||
(schedule.no_empty && is_value_transfer && !ext.exists_and_not_null(&address)?)
) {
gas = overflowing!(gas.overflow_add(schedule.call_new_account_gas.into()));
}
if is_value_transfer {
gas = overflowing!(gas.overflow_add(schedule.call_value_transfer_gas.into()));
}
if is_value_transfer {
gas = overflowing!(gas.overflow_add(schedule.call_value_transfer_gas.into()));
}
let requested = *stack.peek(0);
let requested = *stack.peek(0);
Request::GasMemProvide(gas, mem, Some(requested))
}
instructions::DELEGATECALL | instructions::STATICCALL => {
let gas = Gas::from(schedule.call_gas);
let mem = cmp::max(
mem_needed(stack.peek(4), stack.peek(5))?,
mem_needed(stack.peek(2), stack.peek(3))?,
);
let requested = *stack.peek(0);
Request::GasMemProvide(gas, mem, Some(requested))
},
instructions::DELEGATECALL | instructions::STATICCALL => {
let gas = Gas::from(schedule.call_gas);
let mem = cmp::max(
mem_needed(stack.peek(4), stack.peek(5))?,
mem_needed(stack.peek(2), stack.peek(3))?
);
let requested = *stack.peek(0);
Request::GasMemProvide(gas, mem, Some(requested))
}
instructions::CREATE => {
let start = stack.peek(1);
let len = stack.peek(2);
Request::GasMemProvide(gas, mem, Some(requested))
},
instructions::CREATE => {
let start = stack.peek(1);
let len = stack.peek(2);
let gas = Gas::from(schedule.create_gas);
let mem = mem_needed(start, len)?;
let gas = Gas::from(schedule.create_gas);
let mem = mem_needed(start, len)?;
Request::GasMemProvide(gas, mem, None)
}
instructions::CREATE2 => {
let start = stack.peek(1);
let len = stack.peek(2);
Request::GasMemProvide(gas, mem, None)
},
instructions::CREATE2 => {
let start = stack.peek(1);
let len = stack.peek(2);
let base = Gas::from(schedule.create_gas);
let word = overflowing!(to_word_size(Gas::from_u256(*len)?));
let word_gas = overflowing!(Gas::from(schedule.sha3_word_gas).overflow_mul(word));
let gas = overflowing!(base.overflow_add(word_gas));
let mem = mem_needed(start, len)?;
let base = Gas::from(schedule.create_gas);
let word = overflowing!(to_word_size(Gas::from_u256(*len)?));
let word_gas = overflowing!(Gas::from(schedule.sha3_word_gas).overflow_mul(word));
let gas = overflowing!(base.overflow_add(word_gas));
let mem = mem_needed(start, len)?;
Request::GasMemProvide(gas, mem, None)
}
instructions::EXP => {
let expon = stack.peek(1);
let bytes = ((expon.bits() + 7) / 8) as usize;
let gas = Gas::from(schedule.exp_gas + schedule.exp_byte_gas * bytes);
Request::Gas(gas)
}
instructions::BLOCKHASH => Request::Gas(Gas::from(schedule.blockhash_gas)),
_ => Request::Gas(default_gas),
};
Request::GasMemProvide(gas, mem, None)
},
instructions::EXP => {
let expon = stack.peek(1);
let bytes = ((expon.bits() + 7) / 8) as usize;
let gas = Gas::from(schedule.exp_gas + schedule.exp_byte_gas * bytes);
Request::Gas(gas)
},
instructions::BLOCKHASH => {
Request::Gas(Gas::from(schedule.blockhash_gas))
},
_ => Request::Gas(default_gas),
};
Ok(match cost {
Request::Gas(gas) => InstructionRequirements {
gas_cost: gas,
provide_gas: None,
memory_required_size: 0,
memory_total_gas: self.current_mem_gas,
},
Request::GasMem(gas, mem_size) => {
let (mem_gas_cost, new_mem_gas, new_mem_size) =
self.mem_gas_cost(schedule, current_mem_size, &mem_size)?;
let gas = overflowing!(gas.overflow_add(mem_gas_cost));
InstructionRequirements {
gas_cost: gas,
provide_gas: None,
memory_required_size: new_mem_size,
memory_total_gas: new_mem_gas,
}
}
Request::GasMemProvide(gas, mem_size, requested) => {
let (mem_gas_cost, new_mem_gas, new_mem_size) =
self.mem_gas_cost(schedule, current_mem_size, &mem_size)?;
let gas = overflowing!(gas.overflow_add(mem_gas_cost));
let provided = self.gas_provided(schedule, gas, requested)?;
let total_gas = overflowing!(gas.overflow_add(provided));
Ok(match cost {
Request::Gas(gas) => {
InstructionRequirements {
gas_cost: gas,
provide_gas: None,
memory_required_size: 0,
memory_total_gas: self.current_mem_gas,
}
},
Request::GasMem(gas, mem_size) => {
let (mem_gas_cost, new_mem_gas, new_mem_size) = self.mem_gas_cost(schedule, current_mem_size, &mem_size)?;
let gas = overflowing!(gas.overflow_add(mem_gas_cost));
InstructionRequirements {
gas_cost: gas,
provide_gas: None,
memory_required_size: new_mem_size,
memory_total_gas: new_mem_gas,
}
},
Request::GasMemProvide(gas, mem_size, requested) => {
let (mem_gas_cost, new_mem_gas, new_mem_size) = self.mem_gas_cost(schedule, current_mem_size, &mem_size)?;
let gas = overflowing!(gas.overflow_add(mem_gas_cost));
let provided = self.gas_provided(schedule, gas, requested)?;
let total_gas = overflowing!(gas.overflow_add(provided));
InstructionRequirements {
gas_cost: total_gas,
provide_gas: Some(provided),
memory_required_size: new_mem_size,
memory_total_gas: new_mem_gas,
}
}
Request::GasMemCopy(gas, mem_size, copy) => {
let (mem_gas_cost, new_mem_gas, new_mem_size) =
self.mem_gas_cost(schedule, current_mem_size, &mem_size)?;
let copy = overflowing!(to_word_size(copy));
let copy_gas = overflowing!(Gas::from(schedule.copy_gas).overflow_mul(copy));
let gas = overflowing!(gas.overflow_add(copy_gas));
let gas = overflowing!(gas.overflow_add(mem_gas_cost));
InstructionRequirements {
gas_cost: total_gas,
provide_gas: Some(provided),
memory_required_size: new_mem_size,
memory_total_gas: new_mem_gas,
}
},
Request::GasMemCopy(gas, mem_size, copy) => {
let (mem_gas_cost, new_mem_gas, new_mem_size) = self.mem_gas_cost(schedule, current_mem_size, &mem_size)?;
let copy = overflowing!(to_word_size(copy));
let copy_gas = overflowing!(Gas::from(schedule.copy_gas).overflow_mul(copy));
let gas = overflowing!(gas.overflow_add(copy_gas));
let gas = overflowing!(gas.overflow_add(mem_gas_cost));
InstructionRequirements {
gas_cost: gas,
provide_gas: None,
memory_required_size: new_mem_size,
memory_total_gas: new_mem_gas,
}
}
})
}
InstructionRequirements {
gas_cost: gas,
provide_gas: None,
memory_required_size: new_mem_size,
memory_total_gas: new_mem_gas,
}
},
})
}
fn mem_gas_cost(
&self,
schedule: &Schedule,
current_mem_size: usize,
mem_size: &Gas,
) -> vm::Result<(Gas, Gas, usize)> {
let gas_for_mem = |mem_size: Gas| {
let s = mem_size >> 5;
// s * memory_gas + s * s / quad_coeff_div
let a = overflowing!(s.overflow_mul(Gas::from(schedule.memory_gas)));
fn mem_gas_cost(&self, schedule: &Schedule, current_mem_size: usize, mem_size: &Gas) -> vm::Result<(Gas, Gas, usize)> {
let gas_for_mem = |mem_size: Gas| {
let s = mem_size >> 5;
// s * memory_gas + s * s / quad_coeff_div
let a = overflowing!(s.overflow_mul(Gas::from(schedule.memory_gas)));
// Calculate s*s/quad_coeff_div
assert_eq!(schedule.quad_coeff_div, 512);
let b = overflowing!(s.overflow_mul_shr(s, 9));
Ok(overflowing!(a.overflow_add(b)))
};
// Calculate s*s/quad_coeff_div
assert_eq!(schedule.quad_coeff_div, 512);
let b = overflowing!(s.overflow_mul_shr(s, 9));
Ok(overflowing!(a.overflow_add(b)))
};
let current_mem_size = Gas::from(current_mem_size);
let req_mem_size_rounded = overflowing!(to_word_size(*mem_size)) << 5;
let current_mem_size = Gas::from(current_mem_size);
let req_mem_size_rounded = overflowing!(to_word_size(*mem_size)) << 5;
let (mem_gas_cost, new_mem_gas) = if req_mem_size_rounded > current_mem_size {
let new_mem_gas = gas_for_mem(req_mem_size_rounded)?;
(new_mem_gas - self.current_mem_gas, new_mem_gas)
} else {
(Gas::from(0), self.current_mem_gas)
};
let (mem_gas_cost, new_mem_gas) = if req_mem_size_rounded > current_mem_size {
let new_mem_gas = gas_for_mem(req_mem_size_rounded)?;
(new_mem_gas - self.current_mem_gas, new_mem_gas)
} else {
(Gas::from(0), self.current_mem_gas)
};
Ok((mem_gas_cost, new_mem_gas, req_mem_size_rounded.as_usize()))
}
Ok((mem_gas_cost, new_mem_gas, req_mem_size_rounded.as_usize()))
}
}
#[inline]
fn mem_needed_const<Gas: evm::CostType>(mem: &U256, add: usize) -> vm::Result<Gas> {
Gas::from_u256(overflowing!(mem.overflowing_add(U256::from(add))))
Gas::from_u256(overflowing!(mem.overflowing_add(U256::from(add))))
}
#[inline]
fn mem_needed<Gas: evm::CostType>(offset: &U256, size: &U256) -> vm::Result<Gas> {
if size.is_zero() {
return Ok(Gas::from(0));
}
if size.is_zero() {
return Ok(Gas::from(0));
}
Gas::from_u256(overflowing!(offset.overflowing_add(*size)))
Gas::from_u256(overflowing!(offset.overflowing_add(*size)))
}
#[inline]
fn add_gas_usize<Gas: evm::CostType>(value: Gas, num: usize) -> (Gas, bool) {
value.overflow_add(Gas::from(num))
value.overflow_add(Gas::from(num))
}
#[inline]
fn to_word_size<Gas: evm::CostType>(value: Gas) -> (Gas, bool) {
let (gas, overflow) = add_gas_usize(value, 31);
if overflow {
return (gas, overflow);
}
let (gas, overflow) = add_gas_usize(value, 31);
if overflow {
return (gas, overflow);
}
(gas >> 5, false)
(gas >> 5, false)
}
#[inline]
fn calculate_eip1283_sstore_gas<Gas: evm::CostType>(schedule: &Schedule, original: &U256, current: &U256, new: &U256) -> Gas {
Gas::from(
if current == new {
// 1. If current value equals new value (this is a no-op), 200 gas is deducted.
schedule.sload_gas
} else {
// 2. If current value does not equal new value
if original == current {
// 2.1. If original value equals current value (this storage slot has not been changed by the current execution context)
if original.is_zero() {
// 2.1.1. If original value is 0, 20000 gas is deducted.
schedule.sstore_set_gas
} else {
// 2.1.2. Otherwise, 5000 gas is deducted.
schedule.sstore_reset_gas
fn calculate_eip1283_sstore_gas<Gas: evm::CostType>(
schedule: &Schedule,
original: &U256,
current: &U256,
new: &U256,
) -> Gas {
Gas::from(if current == new {
// 1. If current value equals new value (this is a no-op), 200 gas is deducted.
schedule.sload_gas
} else {
// 2. If current value does not equal new value
if original == current {
// 2.1. If original value equals current value (this storage slot has not been changed by the current execution context)
if original.is_zero() {
// 2.1.1. If original value is 0, 20000 gas is deducted.
schedule.sstore_set_gas
} else {
// 2.1.2. Otherwise, 5000 gas is deducted.
schedule.sstore_reset_gas
// 2.1.2.1. If new value is 0, add 15000 gas to refund counter.
}
} else {
// 2.2. If original value does not equal current value (this storage slot is dirty), 200 gas is deducted. Apply both of the following clauses.
schedule.sload_gas
// 2.1.2.1. If new value is 0, add 15000 gas to refund counter.
}
} else {
// 2.2. If original value does not equal current value (this storage slot is dirty), 200 gas is deducted. Apply both of the following clauses.
schedule.sload_gas
// 2.2.1. If original value is not 0
// 2.2.1.1. If current value is 0 (also means that new value is not 0), remove 15000 gas from refund counter. We can prove that refund counter will never go below 0.
// 2.2.1.2. If new value is 0 (also means that current value is not 0), add 15000 gas to refund counter.
// 2.2.1. If original value is not 0
// 2.2.1.1. If current value is 0 (also means that new value is not 0), remove 15000 gas from refund counter. We can prove that refund counter will never go below 0.
// 2.2.1.2. If new value is 0 (also means that current value is not 0), add 15000 gas to refund counter.
// 2.2.2. If original value equals new value (this storage slot is reset)
// 2.2.2.1. If original value is 0, add 19800 gas to refund counter.
// 2.2.2.2. Otherwise, add 4800 gas to refund counter.
}
}
)
// 2.2.2. If original value equals new value (this storage slot is reset)
// 2.2.2.1. If original value is 0, add 19800 gas to refund counter.
// 2.2.2.2. Otherwise, add 4800 gas to refund counter.
}
})
}
pub fn handle_eip1283_sstore_clears_refund(ext: &mut vm::Ext, original: &U256, current: &U256, new: &U256) {
let sstore_clears_schedule = ext.schedule().sstore_refund_gas;
pub fn handle_eip1283_sstore_clears_refund(
ext: &mut vm::Ext,
original: &U256,
current: &U256,
new: &U256,
) {
let sstore_clears_schedule = ext.schedule().sstore_refund_gas;
if current == new {
// 1. If current value equals new value (this is a no-op), 200 gas is deducted.
} else {
// 2. If current value does not equal new value
if original == current {
// 2.1. If original value equals current value (this storage slot has not been changed by the current execution context)
if original.is_zero() {
// 2.1.1. If original value is 0, 20000 gas is deducted.
} else {
// 2.1.2. Otherwise, 5000 gas is deducted.
if new.is_zero() {
// 2.1.2.1. If new value is 0, add 15000 gas to refund counter.
ext.add_sstore_refund(sstore_clears_schedule);
}
}
} else {
// 2.2. If original value does not equal current value (this storage slot is dirty), 200 gas is deducted. Apply both of the following clauses.
if current == new {
// 1. If current value equals new value (this is a no-op), 200 gas is deducted.
} else {
// 2. If current value does not equal new value
if original == current {
// 2.1. If original value equals current value (this storage slot has not been changed by the current execution context)
if original.is_zero() {
// 2.1.1. If original value is 0, 20000 gas is deducted.
} else {
// 2.1.2. Otherwise, 5000 gas is deducted.
if new.is_zero() {
// 2.1.2.1. If new value is 0, add 15000 gas to refund counter.
ext.add_sstore_refund(sstore_clears_schedule);
}
}
} else {
// 2.2. If original value does not equal current value (this storage slot is dirty), 200 gas is deducted. Apply both of the following clauses.
if !original.is_zero() {
// 2.2.1. If original value is not 0
if current.is_zero() {
// 2.2.1.1. If current value is 0 (also means that new value is not 0), remove 15000 gas from refund counter. We can prove that refund counter will never go below 0.
ext.sub_sstore_refund(sstore_clears_schedule);
} else if new.is_zero() {
// 2.2.1.2. If new value is 0 (also means that current value is not 0), add 15000 gas to refund counter.
ext.add_sstore_refund(sstore_clears_schedule);
}
}
if !original.is_zero() {
// 2.2.1. If original value is not 0
if current.is_zero() {
// 2.2.1.1. If current value is 0 (also means that new value is not 0), remove 15000 gas from refund counter. We can prove that refund counter will never go below 0.
ext.sub_sstore_refund(sstore_clears_schedule);
} else if new.is_zero() {
// 2.2.1.2. If new value is 0 (also means that current value is not 0), add 15000 gas to refund counter.
ext.add_sstore_refund(sstore_clears_schedule);
}
}
if original == new {
// 2.2.2. If original value equals new value (this storage slot is reset)
if original.is_zero() {
// 2.2.2.1. If original value is 0, add 19800 gas to refund counter.
let refund = ext.schedule().sstore_set_gas - ext.schedule().sload_gas;
ext.add_sstore_refund(refund);
} else {
// 2.2.2.2. Otherwise, add 4800 gas to refund counter.
let refund = ext.schedule().sstore_reset_gas - ext.schedule().sload_gas;
ext.add_sstore_refund(refund);
}
}
}
}
if original == new {
// 2.2.2. If original value equals new value (this storage slot is reset)
if original.is_zero() {
// 2.2.2.1. If original value is 0, add 19800 gas to refund counter.
let refund = ext.schedule().sstore_set_gas - ext.schedule().sload_gas;
ext.add_sstore_refund(refund);
} else {
// 2.2.2.2. Otherwise, add 4800 gas to refund counter.
let refund = ext.schedule().sstore_reset_gas - ext.schedule().sload_gas;
ext.add_sstore_refund(refund);
}
}
}
}
}
#[test]
fn test_mem_gas_cost() {
// given
let gasometer = Gasometer::<U256>::new(U256::zero());
let schedule = Schedule::default();
let current_mem_size = 5;
let mem_size = !U256::zero();
// given
let gasometer = Gasometer::<U256>::new(U256::zero());
let schedule = Schedule::default();
let current_mem_size = 5;
let mem_size = !U256::zero();
// when
let result = gasometer.mem_gas_cost(&schedule, current_mem_size, &mem_size);
// when
let result = gasometer.mem_gas_cost(&schedule, current_mem_size, &mem_size);
// then
if result.is_ok() {
assert!(false, "Should fail with OutOfGas");
}
// then
if result.is_ok() {
assert!(false, "Should fail with OutOfGas");
}
}
#[test]
fn test_calculate_mem_cost() {
// given
let gasometer = Gasometer::<usize>::new(0);
let schedule = Schedule::default();
let current_mem_size = 0;
let mem_size = 5;
// given
let gasometer = Gasometer::<usize>::new(0);
let schedule = Schedule::default();
let current_mem_size = 0;
let mem_size = 5;
// when
let (mem_cost, new_mem_gas, mem_size) = gasometer.mem_gas_cost(&schedule, current_mem_size, &mem_size).unwrap();
// when
let (mem_cost, new_mem_gas, mem_size) = gasometer
.mem_gas_cost(&schedule, current_mem_size, &mem_size)
.unwrap();
// then
assert_eq!(mem_cost, 3);
assert_eq!(new_mem_gas, 3);
assert_eq!(mem_size, 32);
// then
assert_eq!(mem_cost, 3);
assert_eq!(new_mem_gas, 3);
assert_eq!(mem_size, 32);
}

View File

@ -19,144 +19,156 @@ pub use self::inner::*;
#[macro_use]
#[cfg(not(feature = "evm-debug"))]
mod inner {
macro_rules! evm_debug {
($x: expr) => {}
}
macro_rules! evm_debug {
($x: expr) => {};
}
pub struct EvmInformant;
impl EvmInformant {
pub fn new(_depth: usize) -> Self {
EvmInformant {}
}
pub fn done(&mut self) {}
}
pub struct EvmInformant;
impl EvmInformant {
pub fn new(_depth: usize) -> Self {
EvmInformant {}
}
pub fn done(&mut self) {}
}
}
#[macro_use]
#[cfg(feature = "evm-debug")]
mod inner {
use std::iter;
use std::collections::HashMap;
use std::time::{Instant, Duration};
use std::{
collections::HashMap,
iter,
time::{Duration, Instant},
};
use ethereum_types::U256;
use ethereum_types::U256;
use interpreter::stack::Stack;
use instructions::{Instruction, InstructionInfo};
use CostType;
use instructions::{Instruction, InstructionInfo};
use interpreter::stack::Stack;
use CostType;
macro_rules! evm_debug {
($x: expr) => {
$x
}
}
macro_rules! evm_debug {
($x: expr) => {
$x
};
}
fn print(data: String) {
if cfg!(feature = "evm-debug-tests") {
println!("{}", data);
} else {
debug!(target: "evm", "{}", data);
}
}
fn print(data: String) {
if cfg!(feature = "evm-debug-tests") {
println!("{}", data);
} else {
debug!(target: "evm", "{}", data);
}
}
pub struct EvmInformant {
spacing: String,
last_instruction: Instant,
stats: HashMap<Instruction, Stats>,
}
pub struct EvmInformant {
spacing: String,
last_instruction: Instant,
stats: HashMap<Instruction, Stats>,
}
impl EvmInformant {
impl EvmInformant {
fn color(instruction: Instruction, name: &str) -> String {
let c = instruction as usize % 6;
let colors = [31, 34, 33, 32, 35, 36];
format!("\x1B[1;{}m{}\x1B[0m", colors[c], name)
}
fn color(instruction: Instruction, name: &str) -> String {
let c = instruction as usize % 6;
let colors = [31, 34, 33, 32, 35, 36];
format!("\x1B[1;{}m{}\x1B[0m", colors[c], name)
}
fn as_micro(duration: &Duration) -> u64 {
let mut sec = duration.as_secs();
let subsec = duration.subsec_nanos() as u64;
sec = sec.saturating_mul(1_000_000u64);
sec += subsec / 1_000;
sec
}
fn as_micro(duration: &Duration) -> u64 {
let mut sec = duration.as_secs();
let subsec = duration.subsec_nanos() as u64;
sec = sec.saturating_mul(1_000_000u64);
sec += subsec / 1_000;
sec
}
pub fn new(depth: usize) -> Self {
EvmInformant {
spacing: iter::repeat(".").take(depth).collect(),
last_instruction: Instant::now(),
stats: HashMap::new(),
}
}
pub fn new(depth: usize) -> Self {
EvmInformant {
spacing: iter::repeat(".").take(depth).collect(),
last_instruction: Instant::now(),
stats: HashMap::new(),
}
}
pub fn before_instruction<Cost: CostType>(
&mut self,
pc: usize,
instruction: Instruction,
info: &InstructionInfo,
current_gas: &Cost,
stack: &Stack<U256>,
) {
let time = self.last_instruction.elapsed();
self.last_instruction = Instant::now();
pub fn before_instruction<Cost: CostType>(&mut self, pc: usize, instruction: Instruction, info: &InstructionInfo, current_gas: &Cost, stack: &Stack<U256>) {
let time = self.last_instruction.elapsed();
self.last_instruction = Instant::now();
print(format!(
"{}[0x{:<3x}][{:>19}(0x{:<2x}) Gas Left: {:6?} (Previous took: {:10}μs)",
&self.spacing,
pc,
Self::color(instruction, info.name),
instruction as u8,
current_gas,
Self::as_micro(&time),
));
print(format!("{}[0x{:<3x}][{:>19}(0x{:<2x}) Gas Left: {:6?} (Previous took: {:10}μs)",
&self.spacing,
pc,
Self::color(instruction, info.name),
instruction as u8,
current_gas,
Self::as_micro(&time),
));
if info.args > 0 {
for (idx, item) in stack.peek_top(info.args).iter().enumerate() {
print(format!("{} |{:2}: {:?}", self.spacing, idx, item));
}
}
}
if info.args > 0 {
for (idx, item) in stack.peek_top(info.args).iter().enumerate() {
print(format!("{} |{:2}: {:?}", self.spacing, idx, item));
}
}
}
pub fn after_instruction(&mut self, instruction: Instruction) {
let stats = self
.stats
.entry(instruction)
.or_insert_with(|| Stats::default());
let took = self.last_instruction.elapsed();
stats.note(took);
}
pub fn after_instruction(&mut self, instruction: Instruction) {
let stats = self.stats.entry(instruction).or_insert_with(|| Stats::default());
let took = self.last_instruction.elapsed();
stats.note(took);
}
pub fn done(&mut self) {
// Print out stats
let mut stats: Vec<(_, _)> = self.stats.drain().collect();
stats.sort_by(|ref a, ref b| b.1.avg().cmp(&a.1.avg()));
pub fn done(&mut self) {
// Print out stats
let mut stats: Vec<(_,_)> = self.stats.drain().collect();
stats.sort_by(|ref a, ref b| b.1.avg().cmp(&a.1.avg()));
print(format!("\n{}-------OPCODE STATS:", self.spacing));
for (instruction, stats) in stats.into_iter() {
let info = instruction.info();
print(format!(
"{}-------{:>19}(0x{:<2x}) count: {:4}, avg: {:10}μs",
self.spacing,
Self::color(instruction, info.name),
instruction as u8,
stats.count,
stats.avg(),
));
}
}
}
print(format!("\n{}-------OPCODE STATS:", self.spacing));
for (instruction, stats) in stats.into_iter() {
let info = instruction.info();
print(format!("{}-------{:>19}(0x{:<2x}) count: {:4}, avg: {:10}μs",
self.spacing,
Self::color(instruction, info.name),
instruction as u8,
stats.count,
stats.avg(),
));
}
}
struct Stats {
count: u64,
total_duration: Duration,
}
}
impl Default for Stats {
fn default() -> Self {
Stats {
count: 0,
total_duration: Duration::from_secs(0),
}
}
}
struct Stats {
count: u64,
total_duration: Duration,
}
impl Stats {
fn note(&mut self, took: Duration) {
self.count += 1;
self.total_duration += took;
}
impl Default for Stats {
fn default() -> Self {
Stats {
count: 0,
total_duration: Duration::from_secs(0),
}
}
}
impl Stats {
fn note(&mut self, took: Duration) {
self.count += 1;
self.total_duration += took;
}
fn avg(&self) -> u64 {
EvmInformant::as_micro(&self.total_duration) / self.count
}
}
fn avg(&self) -> u64 {
EvmInformant::as_micro(&self.total_duration) / self.count
}
}
}

View File

@ -20,172 +20,175 @@ use vm::ReturnData;
const MAX_RETURN_WASTE_BYTES: usize = 16384;
pub trait Memory {
/// Retrieve current size of the memory
fn size(&self) -> usize;
/// Resize (shrink or expand) the memory to specified size (fills 0)
fn resize(&mut self, new_size: usize);
/// Resize the memory only if its smaller
fn expand(&mut self, new_size: usize);
/// Write single byte to memory
fn write_byte(&mut self, offset: U256, value: U256);
/// Write a word to memory. Does not resize memory!
fn write(&mut self, offset: U256, value: U256);
/// Read a word from memory
fn read(&self, offset: U256) -> U256;
/// Write slice of bytes to memory. Does not resize memory!
fn write_slice(&mut self, offset: U256, &[u8]);
/// Retrieve part of the memory between offset and offset + size
fn read_slice(&self, offset: U256, size: U256) -> &[u8];
/// Retrieve writeable part of memory
fn writeable_slice(&mut self, offset: U256, size: U256) -> &mut[u8];
/// Convert memory into return data.
fn into_return_data(self, offset: U256, size: U256) -> ReturnData;
/// Retrieve current size of the memory
fn size(&self) -> usize;
/// Resize (shrink or expand) the memory to specified size (fills 0)
fn resize(&mut self, new_size: usize);
/// Resize the memory only if its smaller
fn expand(&mut self, new_size: usize);
/// Write single byte to memory
fn write_byte(&mut self, offset: U256, value: U256);
/// Write a word to memory. Does not resize memory!
fn write(&mut self, offset: U256, value: U256);
/// Read a word from memory
fn read(&self, offset: U256) -> U256;
/// Write slice of bytes to memory. Does not resize memory!
fn write_slice(&mut self, offset: U256, &[u8]);
/// Retrieve part of the memory between offset and offset + size
fn read_slice(&self, offset: U256, size: U256) -> &[u8];
/// Retrieve writeable part of memory
fn writeable_slice(&mut self, offset: U256, size: U256) -> &mut [u8];
/// Convert memory into return data.
fn into_return_data(self, offset: U256, size: U256) -> ReturnData;
}
/// Checks whether offset and size is valid memory range
pub fn is_valid_range(off: usize, size: usize) -> bool {
// When size is zero we haven't actually expanded the memory
let overflow = off.overflowing_add(size).1;
size > 0 && !overflow
pub fn is_valid_range(off: usize, size: usize) -> bool {
// When size is zero we haven't actually expanded the memory
let overflow = off.overflowing_add(size).1;
size > 0 && !overflow
}
impl Memory for Vec<u8> {
fn size(&self) -> usize {
self.len()
}
fn size(&self) -> usize {
self.len()
}
fn read_slice(&self, init_off_u: U256, init_size_u: U256) -> &[u8] {
let off = init_off_u.low_u64() as usize;
let size = init_size_u.low_u64() as usize;
if !is_valid_range(off, size) {
&self[0..0]
} else {
&self[off..off+size]
}
}
fn read_slice(&self, init_off_u: U256, init_size_u: U256) -> &[u8] {
let off = init_off_u.low_u64() as usize;
let size = init_size_u.low_u64() as usize;
if !is_valid_range(off, size) {
&self[0..0]
} else {
&self[off..off + size]
}
}
fn read(&self, offset: U256) -> U256 {
let off = offset.low_u64() as usize;
U256::from(&self[off..off+32])
}
fn read(&self, offset: U256) -> U256 {
let off = offset.low_u64() as usize;
U256::from(&self[off..off + 32])
}
fn writeable_slice(&mut self, offset: U256, size: U256) -> &mut [u8] {
let off = offset.low_u64() as usize;
let s = size.low_u64() as usize;
if !is_valid_range(off, s) {
&mut self[0..0]
} else {
&mut self[off..off+s]
}
}
fn writeable_slice(&mut self, offset: U256, size: U256) -> &mut [u8] {
let off = offset.low_u64() as usize;
let s = size.low_u64() as usize;
if !is_valid_range(off, s) {
&mut self[0..0]
} else {
&mut self[off..off + s]
}
}
fn write_slice(&mut self, offset: U256, slice: &[u8]) {
if !slice.is_empty() {
let off = offset.low_u64() as usize;
self[off..off+slice.len()].copy_from_slice(slice);
}
}
fn write_slice(&mut self, offset: U256, slice: &[u8]) {
if !slice.is_empty() {
let off = offset.low_u64() as usize;
self[off..off + slice.len()].copy_from_slice(slice);
}
}
fn write(&mut self, offset: U256, value: U256) {
let off = offset.low_u64() as usize;
value.to_big_endian(&mut self[off..off+32]);
}
fn write(&mut self, offset: U256, value: U256) {
let off = offset.low_u64() as usize;
value.to_big_endian(&mut self[off..off + 32]);
}
fn write_byte(&mut self, offset: U256, value: U256) {
let off = offset.low_u64() as usize;
let val = value.low_u64() as u64;
self[off] = val as u8;
}
fn write_byte(&mut self, offset: U256, value: U256) {
let off = offset.low_u64() as usize;
let val = value.low_u64() as u64;
self[off] = val as u8;
}
fn resize(&mut self, new_size: usize) {
self.resize(new_size, 0);
}
fn resize(&mut self, new_size: usize) {
self.resize(new_size, 0);
}
fn expand(&mut self, size: usize) {
if size > self.len() {
Memory::resize(self, size)
}
}
fn expand(&mut self, size: usize) {
if size > self.len() {
Memory::resize(self, size)
}
}
fn into_return_data(mut self, offset: U256, size: U256) -> ReturnData {
let mut offset = offset.low_u64() as usize;
let size = size.low_u64() as usize;
fn into_return_data(mut self, offset: U256, size: U256) -> ReturnData {
let mut offset = offset.low_u64() as usize;
let size = size.low_u64() as usize;
if !is_valid_range(offset, size) {
return ReturnData::empty();
}
if !is_valid_range(offset, size) {
return ReturnData::empty();
}
if self.len() - size > MAX_RETURN_WASTE_BYTES {
if offset == 0 {
self.truncate(size);
self.shrink_to_fit();
} else {
self = self[offset..(offset + size)].to_vec();
offset = 0;
}
}
ReturnData::new(self, offset, size)
}
if self.len() - size > MAX_RETURN_WASTE_BYTES {
if offset == 0 {
self.truncate(size);
self.shrink_to_fit();
} else {
self = self[offset..(offset + size)].to_vec();
offset = 0;
}
}
ReturnData::new(self, offset, size)
}
}
#[cfg(test)]
mod tests {
use ethereum_types::U256;
use super::Memory;
use super::Memory;
use ethereum_types::U256;
#[test]
fn test_memory_read_and_write() {
// given
let mem: &mut Memory = &mut vec![];
mem.resize(0x80 + 32);
#[test]
fn test_memory_read_and_write() {
// given
let mem: &mut Memory = &mut vec![];
mem.resize(0x80 + 32);
// when
mem.write(U256::from(0x80), U256::from(0xabcdef));
// when
mem.write(U256::from(0x80), U256::from(0xabcdef));
// then
assert_eq!(mem.read(U256::from(0x80)), U256::from(0xabcdef));
}
// then
assert_eq!(mem.read(U256::from(0x80)), U256::from(0xabcdef));
}
#[test]
fn test_memory_read_and_write_byte() {
// given
let mem: &mut Memory = &mut vec![];
mem.resize(32);
#[test]
fn test_memory_read_and_write_byte() {
// given
let mem: &mut Memory = &mut vec![];
mem.resize(32);
// when
mem.write_byte(U256::from(0x1d), U256::from(0xab));
mem.write_byte(U256::from(0x1e), U256::from(0xcd));
mem.write_byte(U256::from(0x1f), U256::from(0xef));
// when
mem.write_byte(U256::from(0x1d), U256::from(0xab));
mem.write_byte(U256::from(0x1e), U256::from(0xcd));
mem.write_byte(U256::from(0x1f), U256::from(0xef));
// then
assert_eq!(mem.read(U256::from(0x00)), U256::from(0xabcdef));
}
// then
assert_eq!(mem.read(U256::from(0x00)), U256::from(0xabcdef));
}
#[test]
fn test_memory_read_slice_and_write_slice() {
let mem: &mut Memory = &mut vec![];
mem.resize(32);
#[test]
fn test_memory_read_slice_and_write_slice() {
let mem: &mut Memory = &mut vec![];
mem.resize(32);
{
let slice = "abcdefghijklmnopqrstuvwxyz012345".as_bytes();
mem.write_slice(U256::from(0), slice);
{
let slice = "abcdefghijklmnopqrstuvwxyz012345".as_bytes();
mem.write_slice(U256::from(0), slice);
assert_eq!(mem.read_slice(U256::from(0), U256::from(32)), slice);
}
assert_eq!(mem.read_slice(U256::from(0), U256::from(32)), slice);
}
// write again
{
let slice = "67890".as_bytes();
mem.write_slice(U256::from(0x1), slice);
// write again
{
let slice = "67890".as_bytes();
mem.write_slice(U256::from(0x1), slice);
assert_eq!(mem.read_slice(U256::from(0), U256::from(7)), "a67890g".as_bytes());
}
assert_eq!(
mem.read_slice(U256::from(0), U256::from(7)),
"a67890g".as_bytes()
);
}
// write empty slice out of bounds
{
let slice = [];
mem.write_slice(U256::from(0x1000), &slice);
assert_eq!(mem.size(), 32);
}
}
// write empty slice out of bounds
{
let slice = [];
mem.write_slice(U256::from(0x1000), &slice);
assert_eq!(mem.size(), 32);
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -14,14 +14,14 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc;
use super::super::instructions::{self, Instruction};
use bit_set::BitSet;
use ethereum_types::H256;
use hash::KECCAK_EMPTY;
use heapsize::HeapSizeOf;
use ethereum_types::H256;
use parking_lot::Mutex;
use memory_cache::MemoryLruCache;
use bit_set::BitSet;
use super::super::instructions::{self, Instruction};
use parking_lot::Mutex;
use std::sync::Arc;
const DEFAULT_CACHE_SIZE: usize = 4 * 1024 * 1024;
@ -29,84 +29,86 @@ const DEFAULT_CACHE_SIZE: usize = 4 * 1024 * 1024;
struct Bits(Arc<BitSet>);
impl HeapSizeOf for Bits {
fn heap_size_of_children(&self) -> usize {
// dealing in bits here
self.0.capacity() * 8
}
fn heap_size_of_children(&self) -> usize {
// dealing in bits here
self.0.capacity() * 8
}
}
/// Global cache for EVM interpreter
pub struct SharedCache {
jump_destinations: Mutex<MemoryLruCache<H256, Bits>>,
jump_destinations: Mutex<MemoryLruCache<H256, Bits>>,
}
impl SharedCache {
/// Create a jump destinations cache with a maximum size in bytes
/// to cache.
pub fn new(max_size: usize) -> Self {
SharedCache {
jump_destinations: Mutex::new(MemoryLruCache::new(max_size)),
}
}
/// Create a jump destinations cache with a maximum size in bytes
/// to cache.
pub fn new(max_size: usize) -> Self {
SharedCache {
jump_destinations: Mutex::new(MemoryLruCache::new(max_size)),
}
}
/// Get jump destinations bitmap for a contract.
pub fn jump_destinations(&self, code_hash: &Option<H256>, code: &[u8]) -> Arc<BitSet> {
if let Some(ref code_hash) = code_hash {
if code_hash == &KECCAK_EMPTY {
return Self::find_jump_destinations(code);
}
/// Get jump destinations bitmap for a contract.
pub fn jump_destinations(&self, code_hash: &Option<H256>, code: &[u8]) -> Arc<BitSet> {
if let Some(ref code_hash) = code_hash {
if code_hash == &KECCAK_EMPTY {
return Self::find_jump_destinations(code);
}
if let Some(d) = self.jump_destinations.lock().get_mut(code_hash) {
return d.0.clone();
}
}
if let Some(d) = self.jump_destinations.lock().get_mut(code_hash) {
return d.0.clone();
}
}
let d = Self::find_jump_destinations(code);
let d = Self::find_jump_destinations(code);
if let Some(ref code_hash) = code_hash {
self.jump_destinations.lock().insert(*code_hash, Bits(d.clone()));
}
if let Some(ref code_hash) = code_hash {
self.jump_destinations
.lock()
.insert(*code_hash, Bits(d.clone()));
}
d
}
d
}
fn find_jump_destinations(code: &[u8]) -> Arc<BitSet> {
let mut jump_dests = BitSet::with_capacity(code.len());
let mut position = 0;
fn find_jump_destinations(code: &[u8]) -> Arc<BitSet> {
let mut jump_dests = BitSet::with_capacity(code.len());
let mut position = 0;
while position < code.len() {
let instruction = Instruction::from_u8(code[position]);
while position < code.len() {
let instruction = Instruction::from_u8(code[position]);
if let Some(instruction) = instruction {
if instruction == instructions::JUMPDEST {
jump_dests.insert(position);
} else if let Some(push_bytes) = instruction.push_bytes() {
position += push_bytes;
}
}
position += 1;
}
if let Some(instruction) = instruction {
if instruction == instructions::JUMPDEST {
jump_dests.insert(position);
} else if let Some(push_bytes) = instruction.push_bytes() {
position += push_bytes;
}
}
position += 1;
}
jump_dests.shrink_to_fit();
Arc::new(jump_dests)
}
jump_dests.shrink_to_fit();
Arc::new(jump_dests)
}
}
impl Default for SharedCache {
fn default() -> Self {
SharedCache::new(DEFAULT_CACHE_SIZE)
}
fn default() -> Self {
SharedCache::new(DEFAULT_CACHE_SIZE)
}
}
#[test]
fn test_find_jump_destinations() {
use rustc_hex::FromHex;
// given
let code = "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5b01600055".from_hex().unwrap();
use rustc_hex::FromHex;
// given
let code = "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5b01600055".from_hex().unwrap();
// when
let valid_jump_destinations = SharedCache::find_jump_destinations(&code);
// when
let valid_jump_destinations = SharedCache::find_jump_destinations(&code);
// then
assert!(valid_jump_destinations.contains(66));
// then
assert!(valid_jump_destinations.contains(66));
}

View File

@ -14,80 +14,85 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::fmt;
use instructions;
use std::fmt;
/// Stack trait with VM-friendly API
pub trait Stack<T> {
/// Returns `Stack[len(Stack) - no_from_top]`
fn peek(&self, no_from_top: usize) -> &T;
/// Swaps Stack[len(Stack)] and Stack[len(Stack) - no_from_top]
fn swap_with_top(&mut self, no_from_top: usize);
/// Returns true if Stack has at least `no_of_elems` elements
fn has(&self, no_of_elems: usize) -> bool;
/// Get element from top and remove it from Stack. Panics if stack is empty.
fn pop_back(&mut self) -> T;
/// Get (up to `instructions::MAX_NO_OF_TOPICS`) elements from top and remove them from Stack. Panics if stack is empty.
fn pop_n(&mut self, no_of_elems: usize) -> &[T];
/// Add element on top of the Stack
fn push(&mut self, elem: T);
/// Get number of elements on Stack
fn size(&self) -> usize;
/// Returns all data on stack.
fn peek_top(&self, no_of_elems: usize) -> &[T];
/// Returns `Stack[len(Stack) - no_from_top]`
fn peek(&self, no_from_top: usize) -> &T;
/// Swaps Stack[len(Stack)] and Stack[len(Stack) - no_from_top]
fn swap_with_top(&mut self, no_from_top: usize);
/// Returns true if Stack has at least `no_of_elems` elements
fn has(&self, no_of_elems: usize) -> bool;
/// Get element from top and remove it from Stack. Panics if stack is empty.
fn pop_back(&mut self) -> T;
/// Get (up to `instructions::MAX_NO_OF_TOPICS`) elements from top and remove them from Stack. Panics if stack is empty.
fn pop_n(&mut self, no_of_elems: usize) -> &[T];
/// Add element on top of the Stack
fn push(&mut self, elem: T);
/// Get number of elements on Stack
fn size(&self) -> usize;
/// Returns all data on stack.
fn peek_top(&self, no_of_elems: usize) -> &[T];
}
pub struct VecStack<S> {
stack: Vec<S>,
logs: [S; instructions::MAX_NO_OF_TOPICS]
stack: Vec<S>,
logs: [S; instructions::MAX_NO_OF_TOPICS],
}
impl<S : Copy> VecStack<S> {
pub fn with_capacity(capacity: usize, zero: S) -> Self {
VecStack {
stack: Vec::with_capacity(capacity),
logs: [zero; instructions::MAX_NO_OF_TOPICS]
}
}
impl<S: Copy> VecStack<S> {
pub fn with_capacity(capacity: usize, zero: S) -> Self {
VecStack {
stack: Vec::with_capacity(capacity),
logs: [zero; instructions::MAX_NO_OF_TOPICS],
}
}
}
impl<S : fmt::Display> Stack<S> for VecStack<S> {
fn peek(&self, no_from_top: usize) -> &S {
&self.stack[self.stack.len() - no_from_top - 1]
}
impl<S: fmt::Display> Stack<S> for VecStack<S> {
fn peek(&self, no_from_top: usize) -> &S {
&self.stack[self.stack.len() - no_from_top - 1]
}
fn swap_with_top(&mut self, no_from_top: usize) {
let len = self.stack.len();
self.stack.swap(len - no_from_top - 1, len - 1);
}
fn swap_with_top(&mut self, no_from_top: usize) {
let len = self.stack.len();
self.stack.swap(len - no_from_top - 1, len - 1);
}
fn has(&self, no_of_elems: usize) -> bool {
self.stack.len() >= no_of_elems
}
fn has(&self, no_of_elems: usize) -> bool {
self.stack.len() >= no_of_elems
}
fn pop_back(&mut self) -> S {
self.stack.pop().expect("instruction validation prevents from popping too many items; qed")
}
fn pop_back(&mut self) -> S {
self.stack
.pop()
.expect("instruction validation prevents from popping too many items; qed")
}
fn pop_n(&mut self, no_of_elems: usize) -> &[S] {
assert!(no_of_elems <= instructions::MAX_NO_OF_TOPICS);
fn pop_n(&mut self, no_of_elems: usize) -> &[S] {
assert!(no_of_elems <= instructions::MAX_NO_OF_TOPICS);
for i in 0..no_of_elems {
self.logs[i] = self.pop_back();
}
&self.logs[0..no_of_elems]
}
for i in 0..no_of_elems {
self.logs[i] = self.pop_back();
}
&self.logs[0..no_of_elems]
}
fn push(&mut self, elem: S) {
self.stack.push(elem);
}
fn push(&mut self, elem: S) {
self.stack.push(elem);
}
fn size(&self) -> usize {
self.stack.len()
}
fn size(&self) -> usize {
self.stack.len()
}
fn peek_top(&self, no_from_top: usize) -> &[S] {
assert!(self.stack.len() >= no_from_top, "peek_top asked for more items than exist.");
&self.stack[self.stack.len() - no_from_top .. self.stack.len()]
}
fn peek_top(&self, no_from_top: usize) -> &[S] {
assert!(
self.stack.len() >= no_from_top,
"peek_top asked for more items than exist."
);
&self.stack[self.stack.len() - no_from_top..self.stack.len()]
}
}

Some files were not shown because too many files have changed in this diff Show More